Search in sources :

Example 76 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.

the class TestCommands method testDropPartitionCommand.

@Test
public void testDropPartitionCommand() throws HCatException, MetaException {
    String dbName = "cmd_testdb";
    String tableName = "cmd_testtable";
    int evid = 789;
    List<HCatFieldSchema> pcols = HCatSchemaUtils.getHCatSchema("b:string").getFields();
    List<HCatFieldSchema> cols = HCatSchemaUtils.getHCatSchema("a:int").getFields();
    Map<String, String> ptnDesc = new HashMap<String, String>();
    ptnDesc.put("b", "test");
    Command testReplicatedDropPtnCmd = new DropPartitionCommand(dbName, tableName, ptnDesc, true, evid);
    assertEquals(evid, testReplicatedDropPtnCmd.getEventId());
    assertEquals(1, testReplicatedDropPtnCmd.get().size());
    assertEquals(true, testReplicatedDropPtnCmd.isRetriable());
    assertEquals(false, testReplicatedDropPtnCmd.isUndoable());
    CommandTestUtils.testCommandSerialization(testReplicatedDropPtnCmd);
    Command testNormalDropPtnCmd = new DropPartitionCommand(dbName, tableName, ptnDesc, false, evid);
    assertEquals(evid, testNormalDropPtnCmd.getEventId());
    assertEquals(1, testNormalDropPtnCmd.get().size());
    assertEquals(true, testNormalDropPtnCmd.isRetriable());
    assertEquals(false, testNormalDropPtnCmd.isUndoable());
    CommandTestUtils.testCommandSerialization(testNormalDropPtnCmd);
    client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE);
    client.createDatabase(HCatCreateDBDesc.create(dbName).ifNotExists(false).build());
    Map<String, String> props = new HashMap<String, String>();
    props.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid + 5));
    HCatTable table = (new HCatTable(dbName, tableName)).tblProps(props).cols(cols).partCols(pcols);
    client.createTable(HCatCreateTableDesc.create(table).build());
    HCatTable tableCreated = client.getTable(dbName, tableName);
    assertNotNull(tableCreated);
    HCatPartition ptnToAdd = (new HCatPartition(tableCreated, ptnDesc, TestHCatClient.makePartLocation(tableCreated, ptnDesc))).parameters(props);
    client.addPartition(HCatAddPartitionDesc.create(ptnToAdd).build());
    HCatPartition p1 = client.getPartition(dbName, tableName, ptnDesc);
    assertNotNull(p1);
    // Test replicated drop, should not drop, because evid < repl.state.id
    LOG.info("About to run :" + testReplicatedDropPtnCmd.get().get(0));
    driver.run(testReplicatedDropPtnCmd.get().get(0));
    HCatPartition p2 = client.getPartition(dbName, tableName, ptnDesc);
    assertNotNull(p2);
    // Test normal drop, should drop unconditionally.
    LOG.info("About to run :" + testNormalDropPtnCmd.get().get(0));
    driver.run(testNormalDropPtnCmd.get().get(0));
    Exception onfe = null;
    try {
        HCatPartition p_del = client.getPartition(dbName, tableName, ptnDesc);
    } catch (Exception e) {
        onfe = e;
    }
    assertNotNull(onfe);
    assertTrue(onfe instanceof ObjectNotFoundException);
    Map<String, String> props2 = new HashMap<String, String>();
    props2.put(ReplicationUtils.REPL_STATE_ID, String.valueOf(evid - 5));
    HCatPartition ptnToAdd2 = (new HCatPartition(tableCreated, ptnDesc, TestHCatClient.makePartLocation(tableCreated, ptnDesc))).parameters(props2);
    client.addPartition(HCatAddPartitionDesc.create(ptnToAdd2).build());
    HCatPartition p3 = client.getPartition(dbName, tableName, ptnDesc);
    assertNotNull(p3);
    // Test replicated drop, should drop this time, since repl.state.id < evid.
    LOG.info("About to run :" + testReplicatedDropPtnCmd.get().get(0));
    driver.run(testReplicatedDropPtnCmd.get().get(0));
    Exception onfe2 = null;
    try {
        HCatPartition p_del = client.getPartition(dbName, tableName, ptnDesc);
    } catch (Exception e) {
        onfe2 = e;
    }
    assertNotNull(onfe2);
    assertTrue(onfe2 instanceof ObjectNotFoundException);
}
Also used : HashMap(java.util.HashMap) Command(org.apache.hive.hcatalog.api.repl.Command) ObjectNotFoundException(org.apache.hive.hcatalog.api.ObjectNotFoundException) HCatTable(org.apache.hive.hcatalog.api.HCatTable) HCatPartition(org.apache.hive.hcatalog.api.HCatPartition) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HCatException(org.apache.hive.hcatalog.common.HCatException) ObjectNotFoundException(org.apache.hive.hcatalog.api.ObjectNotFoundException) IOException(java.io.IOException) HCatFieldSchema(org.apache.hive.hcatalog.data.schema.HCatFieldSchema) Test(org.junit.Test)

Example 77 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.

the class NotificationListener method onCreateTable.

@Override
public void onCreateTable(CreateTableEvent tableEvent) throws MetaException {
    // as "HCAT_EVENT = HCAT_ADD_TABLE"
    if (tableEvent.getStatus()) {
        Table tbl = tableEvent.getTable();
        IHMSHandler handler = tableEvent.getIHMSHandler();
        Configuration conf = handler.getConf();
        Table newTbl;
        try {
            newTbl = handler.get_table_core(tbl.getDbName(), tbl.getTableName()).deepCopy();
            newTbl.getParameters().put(HCatConstants.HCAT_MSGBUS_TOPIC_NAME, getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase() + "." + newTbl.getTableName().toLowerCase());
            handler.alter_table(newTbl.getDbName(), newTbl.getTableName(), newTbl);
        } catch (TException e) {
            MetaException me = new MetaException(e.toString());
            me.initCause(e);
            throw me;
        }
        String topicName = getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase();
        send(messageFactory.buildCreateTableMessage(newTbl), topicName);
    }
}
Also used : TException(org.apache.thrift.TException) Table(org.apache.hadoop.hive.metastore.api.Table) Configuration(org.apache.hadoop.conf.Configuration) IHMSHandler(org.apache.hadoop.hive.metastore.IHMSHandler) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 78 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.

the class TestObjectStore method testDirectSqlErrorMetrics.

@Test
public void testDirectSqlErrorMetrics() throws Exception {
    Configuration conf = MetastoreConf.newMetastoreConf();
    MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true);
    Metrics.initialize(conf);
    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES, "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " + "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter");
    // recall setup so that we get an object store with the metrics initalized
    setUp();
    Counter directSqlErrors = Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS);
    objectStore.new GetDbHelper("foo", true, true) {

        @Override
        protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
            return null;
        }

        @Override
        protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
            return null;
        }
    }.run(false);
    Assert.assertEquals(0, directSqlErrors.getCount());
    objectStore.new GetDbHelper("foo", true, true) {

        @Override
        protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
            throw new RuntimeException();
        }

        @Override
        protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
            return null;
        }
    }.run(false);
    Assert.assertEquals(1, directSqlErrors.getCount());
}
Also used : Counter(com.codahale.metrics.Counter) Configuration(org.apache.hadoop.conf.Configuration) Database(org.apache.hadoop.hive.metastore.api.Database) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest) Test(org.junit.Test)

Example 79 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.

the class TestAddPartitions method testAddPartitionsOneInvalid.

@Test
public void testAddPartitionsOneInvalid() throws Exception {
    createTable();
    String tableLocation = metaStore.getWarehouseRoot() + "/" + TABLE_NAME;
    Partition partition1 = buildPartition(DB_NAME, TABLE_NAME, "2016", tableLocation + "/year=2016");
    Partition partition2 = buildPartition(DB_NAME, TABLE_NAME, "2017", tableLocation + "/year=2017");
    Partition partition3 = buildPartition(Lists.newArrayList("2015", "march"), getYearAndMonthPartCols(), 1);
    partition3.getSd().setLocation(tableLocation + "/year=2015/month=march");
    List<Partition> partitions = new ArrayList<>();
    partitions.add(partition1);
    partitions.add(partition2);
    partitions.add(partition3);
    try {
        client.add_partitions(partitions);
        Assert.fail("MetaException should have happened.");
    } catch (MetaException e) {
    // Expected exception
    }
    List<Partition> parts = client.listPartitions(DB_NAME, TABLE_NAME, MAX);
    Assert.assertNotNull(parts);
    Assert.assertTrue(parts.isEmpty());
    // TODO: This does not work correctly. None of the partitions is created, but the folder
    // for the first two is created. It is because in HiveMetaStore.add_partitions_core when
    // going through the partitions, the first two are already put and started in the thread
    // pool when the exception occurs in the third one. When the exception occurs, we go to
    // the finally part, but the map can be empty (it depends on the progress of the other
    // threads) so the folders won't be deleted.
    // Assert.assertFalse(metaStore.isPathExists(new Path(tableLocation + "/year=2016")));
    // Assert.assertFalse(metaStore.isPathExists(new Path(tableLocation + "/year=2017")));
    Assert.assertFalse(metaStore.isPathExists(new Path(tableLocation + "/year=2015/month=march")));
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.metastore.api.Partition) ArrayList(java.util.ArrayList) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Example 80 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.

the class TestExchangePartitions method testExchangePartitionsCustomPartLocation.

@Test
public void testExchangePartitionsCustomPartLocation() throws Exception {
    Table source = createTable(DB_NAME, "test_source_table", getYearMonthAndDayPartCols(), null);
    Table dest = createTable(DB_NAME, "test_dest_table", getYearMonthAndDayPartCols(), null);
    Partition[] parts = new Partition[2];
    parts[0] = createPartition(source, Lists.newArrayList("2019", "march", "15"), source.getSd().getLocation() + "/2019m15");
    parts[1] = createPartition(source, Lists.newArrayList("2019", "march", "22"), source.getSd().getLocation() + "/2019m22");
    Map<String, String> partitionSpecs = getPartitionSpec(parts[1]);
    try {
        client.exchange_partitions(partitionSpecs, source.getDbName(), source.getTableName(), dest.getDbName(), dest.getTableName());
        Assert.fail("MetaException should have been thrown.");
    } catch (MetaException e) {
    // Expected exception as FileNotFoundException will occur if the partitions have custom
    // location
    }
    checkRemainingPartitions(source, dest, Lists.newArrayList(parts[0], parts[1]));
    List<Partition> destTablePartitions = client.listPartitions(dest.getDbName(), dest.getTableName(), (short) -1);
    Assert.assertTrue(destTablePartitions.isEmpty());
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Aggregations

MetaException (org.apache.hadoop.hive.metastore.api.MetaException)318 IOException (java.io.IOException)123 ArrayList (java.util.ArrayList)95 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)74 TException (org.apache.thrift.TException)67 Table (org.apache.hadoop.hive.metastore.api.Table)59 Partition (org.apache.hadoop.hive.metastore.api.Partition)57 SQLException (java.sql.SQLException)55 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)53 Path (org.apache.hadoop.fs.Path)45 Connection (java.sql.Connection)36 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)34 AlreadyExistsException (org.apache.hadoop.hive.metastore.api.AlreadyExistsException)32 Statement (java.sql.Statement)31 Test (org.junit.Test)30 List (java.util.List)25 Database (org.apache.hadoop.hive.metastore.api.Database)25 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)25 ResultSet (java.sql.ResultSet)22 UnknownDBException (org.apache.hadoop.hive.metastore.api.UnknownDBException)22