use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class TestHiveMetaStoreSchemaMethods method addSchemaVersionOtherDb.
@Test
public void addSchemaVersionOtherDb() throws TException {
String dbName = "other_db_for_schema_version";
Database db = new DatabaseBuilder().setName(dbName).build();
client.createDatabase(db);
String schemaName = uniqueSchemaName();
int version = 1;
ISchema schema = new ISchemaBuilder().setDbName(dbName).setSchemaType(SchemaType.AVRO).setName(schemaName).build();
client.createISchema(schema);
String description = "very descriptive";
String schemaText = "this should look like json, but oh well";
String fingerprint = "this should be an md5 string";
String versionName = "why would I name a version?";
long creationTime = 10;
String serdeName = "serde_for_schema37";
String serializer = "org.apache.hadoop.hive.metastore.test.Serializer";
String deserializer = "org.apache.hadoop.hive.metastore.test.Deserializer";
String serdeDescription = "how do you describe a serde?";
SchemaVersion schemaVersion = new SchemaVersionBuilder().versionOf(schema).setVersion(version).addCol("a", ColumnType.INT_TYPE_NAME).addCol("b", ColumnType.FLOAT_TYPE_NAME).setCreatedAt(creationTime).setState(SchemaVersionState.INITIATED).setDescription(description).setSchemaText(schemaText).setFingerprint(fingerprint).setName(versionName).setSerdeName(serdeName).setSerdeSerializerClass(serializer).setSerdeDeserializerClass(deserializer).setSerdeDescription(serdeDescription).build();
client.addSchemaVersion(schemaVersion);
schemaVersion = client.getSchemaVersion(dbName, schemaName, version);
Assert.assertNotNull(schemaVersion);
Assert.assertEquals(schemaName, schemaVersion.getSchema().getSchemaName());
Assert.assertEquals(dbName, schemaVersion.getSchema().getDbName());
Assert.assertEquals(version, schemaVersion.getVersion());
Assert.assertEquals(creationTime, schemaVersion.getCreatedAt());
Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState());
Assert.assertEquals(description, schemaVersion.getDescription());
Assert.assertEquals(schemaText, schemaVersion.getSchemaText());
Assert.assertEquals(fingerprint, schemaVersion.getFingerprint());
Assert.assertEquals(versionName, schemaVersion.getName());
Assert.assertEquals(serdeName, schemaVersion.getSerDe().getName());
Assert.assertEquals(serializer, schemaVersion.getSerDe().getSerializerClass());
Assert.assertEquals(deserializer, schemaVersion.getSerDe().getDeserializerClass());
Assert.assertEquals(serdeDescription, schemaVersion.getSerDe().getDescription());
Assert.assertEquals(2, schemaVersion.getColsSize());
List<FieldSchema> cols = schemaVersion.getCols();
Collections.sort(cols);
Assert.assertEquals("a", cols.get(0).getName());
Assert.assertEquals(ColumnType.INT_TYPE_NAME, cols.get(0).getType());
Assert.assertEquals("b", cols.get(1).getName());
Assert.assertEquals(ColumnType.FLOAT_TYPE_NAME, cols.get(1).getType());
Assert.assertEquals(1, (int) preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION));
client.dropSchemaVersion(dbName, schemaName, version);
try {
client.getSchemaVersion(dbName, schemaName, version);
Assert.fail();
} catch (NoSuchObjectException e) {
// all good
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class TestHiveMetaStoreSchemaMethods method iSchema.
@Test
public void iSchema() throws TException {
String schemaName = uniqueSchemaName();
String schemaGroup = "group1";
String description = "This is a description";
ISchema schema = new ISchemaBuilder().setSchemaType(SchemaType.AVRO).setName(schemaName).setCompatibility(SchemaCompatibility.FORWARD).setValidationLevel(SchemaValidation.LATEST).setCanEvolve(false).setSchemaGroup(schemaGroup).setDescription(description).build();
client.createISchema(schema);
Assert.assertEquals(1, (int) preEvents.get(PreEventContext.PreEventType.CREATE_ISCHEMA));
Assert.assertEquals(1, (int) events.get(EventMessage.EventType.CREATE_ISCHEMA));
Assert.assertEquals(1, (int) transactionalEvents.get(EventMessage.EventType.CREATE_ISCHEMA));
schema = client.getISchema(DEFAULT_DATABASE_NAME, schemaName);
Assert.assertEquals(1, (int) preEvents.get(PreEventContext.PreEventType.READ_ISCHEMA));
Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType());
Assert.assertEquals(schemaName, schema.getName());
Assert.assertEquals(SchemaCompatibility.FORWARD, schema.getCompatibility());
Assert.assertEquals(SchemaValidation.LATEST, schema.getValidationLevel());
Assert.assertFalse(schema.isCanEvolve());
Assert.assertEquals(schemaGroup, schema.getSchemaGroup());
Assert.assertEquals(description, schema.getDescription());
schemaGroup = "new group";
description = "new description";
schema.setCompatibility(SchemaCompatibility.BOTH);
schema.setValidationLevel(SchemaValidation.ALL);
schema.setCanEvolve(true);
schema.setSchemaGroup(schemaGroup);
schema.setDescription(description);
client.alterISchema(DEFAULT_DATABASE_NAME, schemaName, schema);
Assert.assertEquals(1, (int) preEvents.get(PreEventContext.PreEventType.ALTER_ISCHEMA));
Assert.assertEquals(1, (int) events.get(EventMessage.EventType.ALTER_ISCHEMA));
Assert.assertEquals(1, (int) transactionalEvents.get(EventMessage.EventType.ALTER_ISCHEMA));
schema = client.getISchema(DEFAULT_DATABASE_NAME, schemaName);
Assert.assertEquals(2, (int) preEvents.get(PreEventContext.PreEventType.READ_ISCHEMA));
Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType());
Assert.assertEquals(schemaName, schema.getName());
Assert.assertEquals(SchemaCompatibility.BOTH, schema.getCompatibility());
Assert.assertEquals(SchemaValidation.ALL, schema.getValidationLevel());
Assert.assertTrue(schema.isCanEvolve());
Assert.assertEquals(schemaGroup, schema.getSchemaGroup());
Assert.assertEquals(description, schema.getDescription());
client.dropISchema(DEFAULT_DATABASE_NAME, schemaName);
Assert.assertEquals(1, (int) preEvents.get(PreEventContext.PreEventType.DROP_ISCHEMA));
Assert.assertEquals(1, (int) events.get(EventMessage.EventType.DROP_ISCHEMA));
Assert.assertEquals(1, (int) transactionalEvents.get(EventMessage.EventType.DROP_ISCHEMA));
try {
client.getISchema(DEFAULT_DATABASE_NAME, schemaName);
Assert.fail();
} catch (NoSuchObjectException e) {
// all good
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class TestHiveMetaStoreSchemaMethods method iSchemaOtherDatabase.
@Test
public void iSchemaOtherDatabase() throws TException {
String dbName = "other_db";
Database db = new DatabaseBuilder().setName(dbName).build();
client.createDatabase(db);
String schemaName = uniqueSchemaName();
String schemaGroup = "group1";
String description = "This is a description";
ISchema schema = new ISchemaBuilder().setSchemaType(SchemaType.AVRO).setName(schemaName).setDbName(dbName).setCompatibility(SchemaCompatibility.FORWARD).setValidationLevel(SchemaValidation.LATEST).setCanEvolve(false).setSchemaGroup(schemaGroup).setDescription(description).build();
client.createISchema(schema);
schema = client.getISchema(dbName, schemaName);
Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType());
Assert.assertEquals(schemaName, schema.getName());
Assert.assertEquals(dbName, schema.getDbName());
Assert.assertEquals(SchemaCompatibility.FORWARD, schema.getCompatibility());
Assert.assertEquals(SchemaValidation.LATEST, schema.getValidationLevel());
Assert.assertFalse(schema.isCanEvolve());
Assert.assertEquals(schemaGroup, schema.getSchemaGroup());
Assert.assertEquals(description, schema.getDescription());
schemaGroup = "new group";
description = "new description";
schema.setCompatibility(SchemaCompatibility.BOTH);
schema.setValidationLevel(SchemaValidation.ALL);
schema.setCanEvolve(true);
schema.setSchemaGroup(schemaGroup);
schema.setDescription(description);
client.alterISchema(dbName, schemaName, schema);
schema = client.getISchema(dbName, schemaName);
Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType());
Assert.assertEquals(schemaName, schema.getName());
Assert.assertEquals(dbName, schema.getDbName());
Assert.assertEquals(SchemaCompatibility.BOTH, schema.getCompatibility());
Assert.assertEquals(SchemaValidation.ALL, schema.getValidationLevel());
Assert.assertTrue(schema.isCanEvolve());
Assert.assertEquals(schemaGroup, schema.getSchemaGroup());
Assert.assertEquals(description, schema.getDescription());
client.dropISchema(dbName, schemaName);
try {
client.getISchema(dbName, schemaName);
Assert.fail();
} catch (NoSuchObjectException e) {
// all good
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HiveAlterHandler method alterPartition.
@Override
public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, final String name, final List<String> part_vals, final Partition new_part, EnvironmentContext environmentContext, IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
boolean success = false;
Partition oldPart;
List<TransactionalMetaStoreEventListener> transactionalListeners = null;
if (handler != null) {
transactionalListeners = handler.getTransactionalListeners();
}
// Set DDL time to now if not specified
if (new_part.getParameters() == null || new_part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null || Integer.parseInt(new_part.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System.currentTimeMillis() / 1000));
}
// alter partition
if (part_vals == null || part_vals.size() == 0) {
try {
msdb.openTransaction();
Table tbl = msdb.getTable(dbname, name);
if (tbl == null) {
throw new InvalidObjectException("Unable to alter partition because table or database does not exist.");
}
oldPart = msdb.getPartition(dbname, name, new_part.getValues());
if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
// if stats are same, no need to update
if (MetaStoreUtils.isFastStatsSame(oldPart, new_part)) {
MetaStoreUtils.updateBasicState(environmentContext, new_part.getParameters());
} else {
MetaStoreUtils.updatePartitionStatsFast(new_part, tbl, wh, false, true, environmentContext, false);
}
}
// PartitionView does not have SD. We do not need update its column stats
if (oldPart.getSd() != null) {
updateOrGetPartitionColumnStats(msdb, dbname, name, new_part.getValues(), oldPart.getSd().getCols(), tbl, new_part, null);
}
msdb.alterPartition(dbname, name, new_part.getValues(), new_part);
if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler), environmentContext);
}
success = msdb.commitTransaction();
} catch (InvalidObjectException e) {
throw new InvalidOperationException("alter is not possible");
} catch (NoSuchObjectException e) {
// old partition does not exist
throw new InvalidOperationException("alter is not possible");
} finally {
if (!success) {
msdb.rollbackTransaction();
}
}
return oldPart;
}
// rename partition
String oldPartLoc;
String newPartLoc;
Path srcPath = null;
Path destPath = null;
FileSystem srcFs;
FileSystem destFs = null;
boolean dataWasMoved = false;
try {
msdb.openTransaction();
Table tbl = msdb.getTable(dbname, name);
if (tbl == null) {
throw new InvalidObjectException("Unable to alter partition because table or database does not exist.");
}
try {
oldPart = msdb.getPartition(dbname, name, part_vals);
} catch (NoSuchObjectException e) {
// this means there is no existing partition
throw new InvalidObjectException("Unable to rename partition because old partition does not exist");
}
Partition check_part;
try {
check_part = msdb.getPartition(dbname, name, new_part.getValues());
} catch (NoSuchObjectException e) {
// this means there is no existing partition
check_part = null;
}
if (check_part != null) {
throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." + new_part.getValues());
}
// 3) rename the partition directory if it is not an external table
if (!tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
try {
// if tbl location is available use it
// else derive the tbl location from database location
destPath = wh.getPartitionPath(msdb.getDatabase(dbname), tbl, new_part.getValues());
destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation()));
} catch (NoSuchObjectException e) {
LOG.debug("Didn't find object in metastore ", e);
throw new InvalidOperationException("Unable to change partition or table. Database " + dbname + " does not exist" + " Check metastore logs for detailed stack." + e.getMessage());
}
if (destPath != null) {
newPartLoc = destPath.toString();
oldPartLoc = oldPart.getSd().getLocation();
LOG.info("srcPath:" + oldPartLoc);
LOG.info("descPath:" + newPartLoc);
srcPath = new Path(oldPartLoc);
srcFs = wh.getFs(srcPath);
destFs = wh.getFs(destPath);
// check that src and dest are on the same file system
if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
throw new InvalidOperationException("New table location " + destPath + " is on a different file system than the old location " + srcPath + ". This operation is not supported.");
}
try {
if (srcFs.exists(srcPath)) {
if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
throw new InvalidOperationException("New location for this table " + tbl.getDbName() + "." + tbl.getTableName() + " already exists : " + destPath);
}
// if destPath's parent path doesn't exist, we should mkdir it
Path destParentPath = destPath.getParent();
if (!wh.mkdirs(destParentPath)) {
throw new MetaException("Unable to create path " + destParentPath);
}
// rename the data directory
wh.renameDir(srcPath, destPath, true);
LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done.");
dataWasMoved = true;
}
} catch (IOException e) {
LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, e);
throw new InvalidOperationException("Unable to access src or dest location for partition " + tbl.getDbName() + "." + tbl.getTableName() + " " + new_part.getValues());
} catch (MetaException me) {
LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, me);
throw me;
}
new_part.getSd().setLocation(newPartLoc);
}
} else {
new_part.getSd().setLocation(oldPart.getSd().getLocation());
}
if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
MetaStoreUtils.updatePartitionStatsFast(new_part, tbl, wh, false, true, environmentContext, false);
}
String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues());
ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, dbname, name, oldPart.getValues(), oldPart.getSd().getCols(), tbl, new_part, null);
msdb.alterPartition(dbname, name, part_vals, new_part);
if (cs != null) {
cs.getStatsDesc().setPartName(newPartName);
try {
msdb.updatePartitionColumnStatistics(cs, new_part.getValues());
} catch (InvalidInputException iie) {
throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
} catch (NoSuchObjectException nsoe) {
// It is ok, ignore
}
}
if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler), environmentContext);
}
success = msdb.commitTransaction();
} finally {
if (!success) {
LOG.error("Failed to rename a partition. Rollback transaction");
msdb.rollbackTransaction();
if (dataWasMoved) {
LOG.error("Revert the data move in renaming a partition.");
try {
if (destFs.exists(destPath)) {
wh.renameDir(destPath, srcPath, false);
}
} catch (MetaException me) {
LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath + " in alter partition failure. Manual restore is needed.");
} catch (IOException ioe) {
LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath + " in alter partition failure. Manual restore is needed.");
}
}
}
}
return oldPart;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HiveAlterHandler method alterPartitions.
@Override
public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String dbname, final String name, final List<Partition> new_parts, EnvironmentContext environmentContext, IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
List<Partition> oldParts = new ArrayList<>();
List<List<String>> partValsList = new ArrayList<>();
List<TransactionalMetaStoreEventListener> transactionalListeners = null;
if (handler != null) {
transactionalListeners = handler.getTransactionalListeners();
}
boolean success = false;
try {
msdb.openTransaction();
Table tbl = msdb.getTable(dbname, name);
if (tbl == null) {
throw new InvalidObjectException("Unable to alter partitions because table or database does not exist.");
}
for (Partition tmpPart : new_parts) {
// Set DDL time to now if not specified
if (tmpPart.getParameters() == null || tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME) == null || Integer.parseInt(tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
tmpPart.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System.currentTimeMillis() / 1000));
}
Partition oldTmpPart = msdb.getPartition(dbname, name, tmpPart.getValues());
oldParts.add(oldTmpPart);
partValsList.add(tmpPart.getValues());
if (MetaStoreUtils.requireCalStats(oldTmpPart, tmpPart, tbl, environmentContext)) {
// Check if stats are same, no need to update
if (MetaStoreUtils.isFastStatsSame(oldTmpPart, tmpPart)) {
MetaStoreUtils.updateBasicState(environmentContext, tmpPart.getParameters());
} else {
MetaStoreUtils.updatePartitionStatsFast(tmpPart, tbl, wh, false, true, environmentContext, false);
}
}
// PartitionView does not have SD and we do not need to update its column stats
if (oldTmpPart.getSd() != null) {
updateOrGetPartitionColumnStats(msdb, dbname, name, oldTmpPart.getValues(), oldTmpPart.getSd().getCols(), tbl, tmpPart, null);
}
}
msdb.alterPartitions(dbname, name, partValsList, new_parts);
Iterator<Partition> oldPartsIt = oldParts.iterator();
for (Partition newPart : new_parts) {
Partition oldPart;
if (oldPartsIt.hasNext()) {
oldPart = oldPartsIt.next();
} else {
throw new InvalidOperationException("Missing old partition corresponding to new partition " + "when invoking MetaStoreEventListener for alterPartitions event.");
}
if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, new AlterPartitionEvent(oldPart, newPart, tbl, false, true, handler));
}
}
success = msdb.commitTransaction();
} catch (InvalidObjectException | NoSuchObjectException e) {
throw new InvalidOperationException("Alter partition operation failed: " + e);
} finally {
if (!success) {
msdb.rollbackTransaction();
}
}
return oldParts;
}
Aggregations