Search in sources :

Example 21 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class HiveAlterHandler method alterPartitions.

@Override
public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName, final String dbname, final String name, final List<Partition> new_parts, EnvironmentContext environmentContext, String writeIdList, long writeId, IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
    List<Partition> oldParts = new ArrayList<>();
    List<List<String>> partValsList = new ArrayList<>();
    List<TransactionalMetaStoreEventListener> transactionalListeners = null;
    if (handler != null) {
        transactionalListeners = handler.getTransactionalListeners();
    }
    boolean success = false;
    try {
        msdb.openTransaction();
        // Note: should we pass in write ID here? We only update stats on parts so probably not.
        Table tbl = msdb.getTable(catName, dbname, name, null);
        if (tbl == null) {
            throw new InvalidObjectException("Unable to alter partitions because table or database does not exist.");
        }
        blockPartitionLocationChangesOnReplSource(msdb.getDatabase(catName, dbname), tbl, environmentContext);
        Map<List<String>, Partition> oldPartMap = getExistingPartitions(msdb, new_parts, tbl, catName, dbname, name);
        for (Partition tmpPart : new_parts) {
            // Set DDL time to now if not specified
            if (tmpPart.getParameters() == null || tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME) == null || Integer.parseInt(tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
                tmpPart.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System.currentTimeMillis() / 1000));
            }
            Partition oldTmpPart = oldPartMap.get(tmpPart.getValues());
            oldParts.add(oldTmpPart);
            partValsList.add(tmpPart.getValues());
            if (MetaStoreServerUtils.requireCalStats(oldTmpPart, tmpPart, tbl, environmentContext)) {
                // Check if stats are same, no need to update
                if (MetaStoreServerUtils.isFastStatsSame(oldTmpPart, tmpPart)) {
                    MetaStoreServerUtils.updateBasicState(environmentContext, tmpPart.getParameters());
                } else {
                    MetaStoreServerUtils.updatePartitionStatsFast(tmpPart, tbl, wh, false, true, environmentContext, false);
                }
            }
            // PartitionView does not have SD and we do not need to update its column stats
            if (oldTmpPart.getSd() != null) {
                updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldTmpPart.getValues(), oldTmpPart.getSd().getCols(), tbl, tmpPart, null, null);
            }
        }
        msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, writeId, writeIdList);
        Iterator<Partition> oldPartsIt = oldParts.iterator();
        for (Partition newPart : new_parts) {
            Partition oldPart;
            if (oldPartsIt.hasNext()) {
                oldPart = oldPartsIt.next();
            } else {
                throw new InvalidOperationException("Missing old partition corresponding to new partition " + "when invoking MetaStoreEventListener for alterPartitions event.");
            }
            if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
                MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, new AlterPartitionEvent(oldPart, newPart, tbl, false, true, newPart.getWriteId(), handler), environmentContext);
            }
        }
        success = msdb.commitTransaction();
    } catch (InvalidObjectException | NoSuchObjectException e) {
        throw new InvalidOperationException("Alter partition operation failed: " + e);
    } finally {
        if (!success) {
            msdb.rollbackTransaction();
        }
    }
    return oldParts;
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) AlterPartitionEvent(org.apache.hadoop.hive.metastore.events.AlterPartitionEvent) ArrayList(java.util.ArrayList) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Example 22 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class TestHiveMetaStore method testSimpleTable.

@Test
public void testSimpleTable() throws Exception {
    try {
        String dbName = "simpdb";
        String tblName = "simptbl";
        String tblName2 = "simptbl2";
        String typeName = "Person";
        client.dropTable(dbName, tblName);
        silentDropDatabase(dbName);
        new DatabaseBuilder().setName(dbName).create(client, conf);
        client.dropType(typeName);
        Type typ1 = new Type();
        typ1.setName(typeName);
        typ1.setFields(new ArrayList<>(2));
        typ1.getFields().add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
        typ1.getFields().add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
        client.createType(typ1);
        Table tbl = new TableBuilder().setDbName(dbName).setTableName(tblName).setCols(typ1.getFields()).setNumBuckets(1).addBucketCol("name").addStorageDescriptorParam("test_param_1", "Use this for comments etc").create(client, conf);
        if (isThriftClient) {
            // the createTable() above does not update the location in the 'tbl'
            // object when the client is a thrift client and the code below relies
            // on the location being present in the 'tbl' object - so get the table
            // from the metastore
            tbl = client.getTable(dbName, tblName);
        }
        Table tbl2 = client.getTable(dbName, tblName);
        assertNotNull(tbl2);
        Assert.assertTrue(tbl2.isSetId());
        assertEquals(tbl2.getDbName(), dbName);
        assertEquals(tbl2.getTableName(), tblName);
        assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
        assertEquals(tbl2.getSd().isCompressed(), false);
        assertEquals(tbl2.getSd().getNumBuckets(), 1);
        assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
        assertNotNull(tbl2.getSd().getSerdeInfo());
        tbl.getSd().getSerdeInfo().setParameters(new HashMap<>());
        tbl.getSd().getSerdeInfo().getParameters().put(ColumnType.SERIALIZATION_FORMAT, "1");
        tbl2.setTableName(tblName2);
        tbl2.setParameters(new HashMap<>());
        tbl2.getParameters().put("EXTERNAL", "TRUE");
        tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2");
        List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
        assertNotNull(fieldSchemas);
        assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
        for (FieldSchema fs : tbl.getSd().getCols()) {
            assertTrue(fieldSchemas.contains(fs));
        }
        List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
        assertNotNull(fieldSchemasFull);
        assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size() + tbl.getPartitionKeys().size());
        for (FieldSchema fs : tbl.getSd().getCols()) {
            assertTrue(fieldSchemasFull.contains(fs));
        }
        for (FieldSchema fs : tbl.getPartitionKeys()) {
            assertTrue(fieldSchemasFull.contains(fs));
        }
        tbl2.unsetId();
        client.createTable(tbl2);
        if (isThriftClient) {
            tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName());
        }
        Table tbl3 = client.getTable(dbName, tblName2);
        assertNotNull(tbl3);
        assertEquals(tbl3.getDbName(), dbName);
        assertEquals(tbl3.getTableName(), tblName2);
        assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
        assertEquals(tbl3.getSd().isCompressed(), false);
        assertEquals(tbl3.getSd().getNumBuckets(), 1);
        assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
        assertEquals(tbl3.getParameters(), tbl2.getParameters());
        fieldSchemas = client.getFields(dbName, tblName2);
        assertNotNull(fieldSchemas);
        assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
        for (FieldSchema fs : tbl2.getSd().getCols()) {
            assertTrue(fieldSchemas.contains(fs));
        }
        fieldSchemasFull = client.getSchema(dbName, tblName2);
        assertNotNull(fieldSchemasFull);
        assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size() + tbl2.getPartitionKeys().size());
        for (FieldSchema fs : tbl2.getSd().getCols()) {
            assertTrue(fieldSchemasFull.contains(fs));
        }
        for (FieldSchema fs : tbl2.getPartitionKeys()) {
            assertTrue(fieldSchemasFull.contains(fs));
        }
        assertEquals("Use this for comments etc", tbl2.getSd().getParameters().get("test_param_1"));
        assertEquals("name", tbl2.getSd().getBucketCols().get(0));
        assertTrue("Partition key list is not empty", (tbl2.getPartitionKeys() == null) || (tbl2.getPartitionKeys().size() == 0));
        // test get_table_objects_by_name functionality
        ArrayList<String> tableNames = new ArrayList<>();
        tableNames.add(tblName2);
        tableNames.add(tblName);
        tableNames.add(tblName2);
        List<Table> foundTables = client.getTableObjectsByName(dbName, tableNames);
        assertEquals(2, foundTables.size());
        for (Table t : foundTables) {
            if (t.getTableName().equals(tblName2)) {
                assertEquals(t.getSd().getLocation(), tbl2.getSd().getLocation());
            } else {
                assertEquals(t.getTableName(), tblName);
                assertEquals(t.getSd().getLocation(), tbl.getSd().getLocation());
            }
            assertEquals(t.getSd().getCols().size(), typ1.getFields().size());
            assertEquals(t.getSd().isCompressed(), false);
            assertEquals(foundTables.get(0).getSd().getNumBuckets(), 1);
            assertNotNull(t.getSd().getSerdeInfo());
            assertEquals(t.getDbName(), dbName);
        }
        tableNames.add(1, "table_that_doesnt_exist");
        foundTables = client.getTableObjectsByName(dbName, tableNames);
        assertEquals(foundTables.size(), 2);
        InvalidOperationException ioe = null;
        try {
            foundTables = client.getTableObjectsByName(dbName, null);
        } catch (InvalidOperationException e) {
            ioe = e;
        }
        assertNotNull(ioe);
        assertTrue("Table not found", ioe.getMessage().contains("null tables"));
        UnknownDBException udbe = null;
        try {
            foundTables = client.getTableObjectsByName("db_that_doesnt_exist", tableNames);
        } catch (UnknownDBException e) {
            udbe = e;
        }
        assertNotNull(udbe);
        assertTrue("DB not found", udbe.getMessage().contains("not find database hive.db_that_doesnt_exist"));
        udbe = null;
        try {
            foundTables = client.getTableObjectsByName("", tableNames);
        } catch (UnknownDBException e) {
            udbe = e;
        }
        assertNotNull(udbe);
        assertTrue("DB not found", udbe.getMessage().contains("is null or empty"));
        FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), conf);
        client.dropTable(dbName, tblName);
        assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
        client.dropTable(dbName, tblName2);
        assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
        client.dropType(typeName);
        client.dropDatabase(dbName);
    } catch (Exception e) {
        System.err.println(StringUtils.stringifyException(e));
        System.err.println("testSimpleTable() failed.");
        throw e;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.metastore.api.Table) UnknownDBException(org.apache.hadoop.hive.metastore.api.UnknownDBException) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) ConfigValSecurityException(org.apache.hadoop.hive.metastore.api.ConfigValSecurityException) SQLException(java.sql.SQLException) UnknownDBException(org.apache.hadoop.hive.metastore.api.UnknownDBException) TException(org.apache.thrift.TException) IOException(java.io.IOException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) Type(org.apache.hadoop.hive.metastore.api.Type) DatabaseType(org.apache.hadoop.hive.metastore.api.DatabaseType) ResourceType(org.apache.hadoop.hive.metastore.api.ResourceType) FunctionType(org.apache.hadoop.hive.metastore.api.FunctionType) PrincipalType(org.apache.hadoop.hive.metastore.api.PrincipalType) FileSystem(org.apache.hadoop.fs.FileSystem) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) Test(org.junit.Test)

Example 23 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class TestExceptionHandler method testThrowIfInstance.

@Test
public void testThrowIfInstance() {
    MetaException me = new MetaException("MetaException test");
    try {
        handleException(me).throwIfInstance(RuntimeException.class);
    } catch (Exception e) {
        fail("Exception should not happen:" + e.getMessage());
    }
    try {
        handleException(me).throwIfInstance(MetaException.class);
        fail("Should throw a exception here");
    } catch (Exception e) {
        assertTrue(e == me);
    }
    InvalidOperationException ioe = new InvalidOperationException("InvalidOperationException test");
    try {
        handleException(ioe).throwIfInstance(MetaException.class, InvalidOperationException.class);
        fail("Should throw a exception here");
    } catch (Exception e) {
        assertTrue(e == ioe);
    }
    TException te = new TException("TException");
    try {
        handleException(te).throwIfInstance(MetaException.class, InvalidOperationException.class).throwIfInstance(TException.class).defaultMetaException();
    } catch (Exception e) {
        assertTrue(e == te);
    }
    RuntimeException re = new RuntimeException("RuntimeException test");
    try {
        Exception e = handleException(re).throwIfInstance(MetaException.class, InvalidOperationException.class).throwIfInstance(TException.class).defaultRuntimeException();
        assertTrue(e == re);
    } catch (Exception e) {
        fail("Exception should not happen:" + e.getMessage());
    }
    NullPointerException npe = new NullPointerException();
    try {
        Exception e = handleException(npe).throwIfInstance(MetaException.class, InvalidOperationException.class).throwIfInstance(TException.class).defaultMetaException();
        assertTrue(e instanceof MetaException);
        assertTrue(e.getMessage().equals(npe.toString()));
    } catch (Exception e) {
        fail("Exception should not happen:" + e.getMessage());
    }
    try {
        handleException(me).throwIfInstance(MetaException.class, InvalidOperationException.class).throwIfInstance(TException.class).defaultMetaException();
        fail("Should throw a exception here");
    } catch (Exception e) {
        assertTrue(e == me);
    }
}
Also used : TException(org.apache.thrift.TException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) ExceptionHandler.handleException(org.apache.hadoop.hive.metastore.ExceptionHandler.handleException) TException(org.apache.thrift.TException) IOException(java.io.IOException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Test(org.junit.Test)

Example 24 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class ObjectStore method getResourcePlan.

@Override
public WMFullResourcePlan getResourcePlan(String name, String ns) throws NoSuchObjectException {
    boolean commited = false;
    try {
        openTransaction();
        WMFullResourcePlan fullRp = fullFromMResourcePlan(getMWMResourcePlan(name, ns, false));
        commited = commitTransaction();
        return fullRp;
    } catch (InvalidOperationException e) {
        // Should not happen, edit check is false.
        throw new RuntimeException(e);
    } finally {
        rollbackAndCleanup(commited, (Query) null);
    }
}
Also used : WMFullResourcePlan(org.apache.hadoop.hive.metastore.api.WMFullResourcePlan) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException)

Example 25 with InvalidOperationException

use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.

the class ObjectStore method switchStatus.

private WMFullResourcePlan switchStatus(String name, MWMResourcePlan mResourcePlan, String status, boolean canActivateDisabled, boolean canDeactivate) throws InvalidOperationException {
    Status currentStatus = mResourcePlan.getStatus();
    Status newStatus = null;
    try {
        newStatus = Status.valueOf(status);
    } catch (IllegalArgumentException e) {
        throw new InvalidOperationException("Invalid status: " + status);
    }
    if (newStatus == currentStatus) {
        return null;
    }
    boolean doActivate = false, doValidate = false;
    switch(currentStatus) {
        case // No status change for active resource plan, first activate another plan.
        ACTIVE:
            if (!canDeactivate) {
                throw new InvalidOperationException("Resource plan " + name + " is active; activate another plan first, or disable workload management.");
            }
            break;
        case DISABLED:
            assert newStatus == Status.ACTIVE || newStatus == Status.ENABLED;
            doValidate = true;
            doActivate = (newStatus == Status.ACTIVE);
            if (doActivate && !canActivateDisabled) {
                throw new InvalidOperationException("Resource plan " + name + " is disabled and should be enabled before activation (or in the same command)");
            }
            break;
        case ENABLED:
            if (newStatus == Status.DISABLED) {
                mResourcePlan.setStatus(newStatus);
                // A simple case.
                return null;
            }
            assert newStatus == Status.ACTIVE;
            doActivate = true;
            break;
        default:
            throw new AssertionError("Unexpected status " + currentStatus);
    }
    if (doValidate) {
        // Note: this may use additional inputs from the caller, e.g. maximum query
        // parallelism in the cluster based on physical constraints.
        WMValidateResourcePlanResponse response = getResourcePlanErrors(mResourcePlan);
        if (!response.getErrors().isEmpty()) {
            throw new InvalidOperationException("ResourcePlan: " + name + " is invalid: " + response.getErrors());
        }
    }
    if (doActivate) {
        // Deactivate currently active resource plan.
        deactivateActiveResourcePlan(mResourcePlan.getNs());
        mResourcePlan.setStatus(newStatus);
        return fullFromMResourcePlan(mResourcePlan);
    } else {
        mResourcePlan.setStatus(newStatus);
    }
    return null;
}
Also used : WMResourcePlanStatus(org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus) Status(org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) WMValidateResourcePlanResponse(org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse)

Aggregations

InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)51 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)26 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)23 IOException (java.io.IOException)19 ArrayList (java.util.ArrayList)18 Table (org.apache.hadoop.hive.metastore.api.Table)17 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)16 TException (org.apache.thrift.TException)15 Partition (org.apache.hadoop.hive.metastore.api.Partition)14 FileSystem (org.apache.hadoop.fs.FileSystem)12 Path (org.apache.hadoop.fs.Path)12 List (java.util.List)10 AlreadyExistsException (org.apache.hadoop.hive.metastore.api.AlreadyExistsException)10 InvalidInputException (org.apache.hadoop.hive.metastore.api.InvalidInputException)10 MWMResourcePlan (org.apache.hadoop.hive.metastore.model.MWMResourcePlan)9 SQLException (java.sql.SQLException)8 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)8 Test (org.junit.Test)8 LinkedList (java.util.LinkedList)7 Database (org.apache.hadoop.hive.metastore.api.Database)7