Search in sources :

Example 46 with Database

use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.

the class TestDbNotificationListener method cleanupNotifs.

@Test
public void cleanupNotifs() throws Exception {
    Database db = new Database("cleanup1", "no description", "file:/tmp", emptyParameters);
    msClient.createDatabase(db);
    msClient.dropDatabase("cleanup1");
    LOG.info("Pulling events immediately after createDatabase/dropDatabase");
    NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
    assertEquals(2, rsp.getEventsSize());
    // sleep for expiry time, and then fetch again
    // sleep twice the TTL interval - things should have been cleaned by then.
    Thread.sleep(EVENTS_TTL * 2 * 1000);
    LOG.info("Pulling events again after cleanup");
    NotificationEventResponse rsp2 = msClient.getNextNotification(firstEventId, 0, null);
    LOG.info("second trigger done");
    assertEquals(0, rsp2.getEventsSize());
}
Also used : NotificationEventResponse(org.apache.hadoop.hive.metastore.api.NotificationEventResponse) Database(org.apache.hadoop.hive.metastore.api.Database) Test(org.junit.Test)

Example 47 with Database

use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.

the class TestHiveMetaTool method setUp.

@Override
protected void setUp() throws Exception {
    super.setUp();
    try {
        HiveConf hiveConf = new HiveConf(HiveMetaTool.class);
        client = new HiveMetaStoreClient(hiveConf);
        // Setup output stream to redirect output to
        os = new ByteArrayOutputStream();
        ps = new PrintStream(os);
        // create a dummy database and a couple of dummy tables
        Database db = new Database();
        db.setName(dbName);
        client.dropTable(dbName, tblName);
        client.dropTable(dbName, badTblName);
        dropDatabase(dbName);
        client.createDatabase(db);
        locationUri = db.getLocationUri();
        String avroUri = "hdfs://nn.example.com/warehouse/hive/ab.avsc";
        String badAvroUri = new String("hdfs:/hive");
        client.dropType(typeName);
        Type typ1 = new Type();
        typ1.setName(typeName);
        typ1.setFields(new ArrayList<FieldSchema>(2));
        typ1.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
        typ1.getFields().add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
        client.createType(typ1);
        Table tbl = new Table();
        tbl.setDbName(dbName);
        tbl.setTableName(tblName);
        Map<String, String> parameters = new HashMap<>();
        parameters.put(AvroSerdeUtils.SCHEMA_URL, avroUri);
        tbl.setParameters(parameters);
        StorageDescriptor sd = new StorageDescriptor();
        tbl.setSd(sd);
        sd.setCols(typ1.getFields());
        sd.setCompressed(false);
        sd.setNumBuckets(1);
        sd.setParameters(new HashMap<String, String>());
        sd.getParameters().put("test_param_1", "Use this for comments etc");
        sd.setBucketCols(new ArrayList<String>(2));
        sd.getBucketCols().add("name");
        sd.setSerdeInfo(new SerDeInfo());
        sd.getSerdeInfo().setName(tbl.getTableName());
        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
        sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
        sd.getParameters().put(AvroSerdeUtils.SCHEMA_URL, avroUri);
        sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.avro.AvroSerDe.class.getName());
        sd.setInputFormat(AvroContainerInputFormat.class.getName());
        sd.setOutputFormat(AvroContainerOutputFormat.class.getName());
        tbl.setPartitionKeys(new ArrayList<FieldSchema>());
        client.createTable(tbl);
        // create a table with bad avro uri
        tbl = new Table();
        tbl.setDbName(dbName);
        tbl.setTableName(badTblName);
        sd = new StorageDescriptor();
        tbl.setSd(sd);
        sd.setCols(typ1.getFields());
        sd.setCompressed(false);
        sd.setNumBuckets(1);
        sd.setParameters(new HashMap<String, String>());
        sd.getParameters().put("test_param_1", "Use this for comments etc");
        sd.setBucketCols(new ArrayList<String>(2));
        sd.getBucketCols().add("name");
        sd.setSerdeInfo(new SerDeInfo());
        sd.getSerdeInfo().setName(tbl.getTableName());
        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
        sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
        sd.getParameters().put(AvroSerdeUtils.SCHEMA_URL, badAvroUri);
        sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.avro.AvroSerDe.class.getName());
        sd.setInputFormat(AvroContainerInputFormat.class.getName());
        sd.setOutputFormat(AvroContainerOutputFormat.class.getName());
        tbl.setPartitionKeys(new ArrayList<FieldSchema>());
        client.createTable(tbl);
        client.close();
    } catch (Exception e) {
        System.err.println("Unable to setup the hive metatool test");
        System.err.println(StringUtils.stringifyException(e));
        throw new Exception(e);
    }
}
Also used : PrintStream(java.io.PrintStream) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) ByteArrayOutputStream(java.io.ByteArrayOutputStream) AvroContainerInputFormat(org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) Type(org.apache.hadoop.hive.metastore.api.Type) AvroContainerOutputFormat(org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat) Database(org.apache.hadoop.hive.metastore.api.Database) HiveConf(org.apache.hadoop.hive.conf.HiveConf)

Example 48 with Database

use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.

the class TestMetaStoreAuthorization method testMetaStoreAuthorization.

public void testMetaStoreAuthorization() throws Exception {
    setup();
    MetaStoreTestUtils.startMetaStoreWithRetry(conf);
    HiveMetaStoreClient client = new HiveMetaStoreClient(conf);
    FileSystem fs = null;
    String dbName = "simpdb";
    Database db1 = null;
    Path p = null;
    try {
        try {
            db1 = client.getDatabase(dbName);
            client.dropDatabase(dbName);
        } catch (NoSuchObjectException noe) {
        }
        if (db1 != null) {
            p = new Path(db1.getLocationUri());
            fs = p.getFileSystem(conf);
            fs.delete(p, true);
        }
        db1 = new Database();
        db1.setName(dbName);
        client.createDatabase(db1);
        Database db = client.getDatabase(dbName);
        assertTrue("Databases do not match", db1.getName().equals(db.getName()));
        p = new Path(db.getLocationUri());
        if (fs == null) {
            fs = p.getFileSystem(conf);
        }
        fs.setPermission(p.getParent(), FsPermission.createImmutable((short) 0555));
        try {
            client.dropDatabase(dbName);
            throw new Exception("Expected dropDatabase call to fail");
        } catch (MetaException me) {
        }
        fs.setPermission(p.getParent(), FsPermission.createImmutable((short) 0755));
        client.dropDatabase(dbName);
    } finally {
        if (p != null) {
            fs.delete(p, true);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) Database(org.apache.hadoop.hive.metastore.api.Database) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 49 with Database

use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.

the class TestReplChangeManager method testRecyclePartTable.

@Test
public void testRecyclePartTable() throws Exception {
    // Create db1/t1/dt=20160101/part
    // /dt=20160102/part
    // /dt=20160103/part
    // Test: recycle single file (dt=20160101/part)
    // recycle single partition (dt=20160102)
    // recycle table t1
    String dbName = "db1";
    client.dropDatabase(dbName, true, true);
    Database db = new Database();
    db.setName(dbName);
    client.createDatabase(db);
    String tblName = "t1";
    List<FieldSchema> columns = new ArrayList<FieldSchema>();
    columns.add(new FieldSchema("foo", "string", ""));
    columns.add(new FieldSchema("bar", "string", ""));
    List<FieldSchema> partColumns = new ArrayList<FieldSchema>();
    partColumns.add(new FieldSchema("dt", "string", ""));
    SerDeInfo serdeInfo = new SerDeInfo("LBCSerDe", LazyBinaryColumnarSerDe.class.getCanonicalName(), new HashMap<String, String>());
    StorageDescriptor sd = new StorageDescriptor(columns, null, "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat", "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat", false, 0, serdeInfo, null, null, null);
    Map<String, String> tableParameters = new HashMap<String, String>();
    Table tbl = new Table(tblName, dbName, "", 0, 0, 0, sd, partColumns, tableParameters, "", "", "");
    client.createTable(tbl);
    List<String> values = Arrays.asList("20160101");
    Partition part1 = createPartition(dbName, tblName, columns, values, serdeInfo);
    client.add_partition(part1);
    values = Arrays.asList("20160102");
    Partition part2 = createPartition(dbName, tblName, columns, values, serdeInfo);
    client.add_partition(part2);
    values = Arrays.asList("20160103");
    Partition part3 = createPartition(dbName, tblName, columns, values, serdeInfo);
    client.add_partition(part3);
    Path part1Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160101")), "part");
    createFile(part1Path, "p1");
    String path1Chksum = ReplChangeManager.checksumFor(part1Path, fs);
    Path part2Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160102")), "part");
    createFile(part2Path, "p2");
    String path2Chksum = ReplChangeManager.checksumFor(part2Path, fs);
    Path part3Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160103")), "part");
    createFile(part3Path, "p3");
    String path3Chksum = ReplChangeManager.checksumFor(part3Path, fs);
    assertTrue(part1Path.getFileSystem(hiveConf).exists(part1Path));
    assertTrue(part2Path.getFileSystem(hiveConf).exists(part2Path));
    assertTrue(part3Path.getFileSystem(hiveConf).exists(part3Path));
    ReplChangeManager cm = ReplChangeManager.getInstance(hiveConf);
    // verify cm.recycle(db, table, part) api moves file to cmroot dir
    int ret = cm.recycle(part1Path, RecycleType.MOVE, false);
    Assert.assertEquals(ret, 1);
    Path cmPart1Path = ReplChangeManager.getCMPath(hiveConf, part1Path.getName(), path1Chksum);
    assertTrue(cmPart1Path.getFileSystem(hiveConf).exists(cmPart1Path));
    // Verify dropPartition recycle part files
    client.dropPartition(dbName, tblName, Arrays.asList("20160102"));
    assertFalse(part2Path.getFileSystem(hiveConf).exists(part2Path));
    Path cmPart2Path = ReplChangeManager.getCMPath(hiveConf, part2Path.getName(), path2Chksum);
    assertTrue(cmPart2Path.getFileSystem(hiveConf).exists(cmPart2Path));
    // Verify dropTable recycle partition files
    client.dropTable(dbName, tblName);
    assertFalse(part3Path.getFileSystem(hiveConf).exists(part3Path));
    Path cmPart3Path = ReplChangeManager.getCMPath(hiveConf, part3Path.getName(), path3Chksum);
    assertTrue(cmPart3Path.getFileSystem(hiveConf).exists(cmPart3Path));
    client.dropDatabase(dbName, true, true);
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) ArrayList(java.util.ArrayList) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Database(org.apache.hadoop.hive.metastore.api.Database) LazyBinaryColumnarSerDe(org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe) Test(org.junit.Test)

Example 50 with Database

use of org.apache.hadoop.hive.metastore.api.Database in project hive by apache.

the class TestReplicationScenarios method testDatabaseAlters.

@Test
public void testDatabaseAlters() throws IOException {
    String testName = "DatabaseAlters";
    String dbName = createDB(testName, driver);
    String replDbName = dbName + "_dupe";
    String ownerName = "test";
    run("ALTER DATABASE " + dbName + " SET OWNER USER " + ownerName, driver);
    // Trigger bootstrap replication
    Tuple bootstrap = bootstrapLoadAndVerify(dbName, replDbName);
    try {
        Database replDb = metaStoreClientMirror.getDatabase(replDbName);
        assertEquals(ownerName, replDb.getOwnerName());
        assertEquals("USER", replDb.getOwnerType().toString());
    } catch (TException e) {
        assertNull(e);
    }
    // Alter database set DB property
    String testKey = "blah";
    String testVal = "foo";
    run("ALTER DATABASE " + dbName + " SET DBPROPERTIES ('" + testKey + "' = '" + testVal + "')", driver);
    // All alters done, now we replicate them over.
    Tuple incremental = incrementalLoadAndVerify(dbName, bootstrap.lastReplId, replDbName);
    // Replication done, we need to check if the new property is added
    try {
        Database replDb = metaStoreClientMirror.getDatabase(replDbName);
        assertTrue(replDb.getParameters().containsKey(testKey));
        assertEquals(testVal, replDb.getParameters().get(testKey));
    } catch (TException e) {
        assertNull(e);
    }
    String newValue = "newFoo";
    String newOwnerName = "newTest";
    run("ALTER DATABASE " + dbName + " SET DBPROPERTIES ('" + testKey + "' = '" + newValue + "')", driver);
    run("ALTER DATABASE " + dbName + " SET OWNER ROLE " + newOwnerName, driver);
    incremental = incrementalLoadAndVerify(dbName, incremental.lastReplId, replDbName);
    // Replication done, we need to check if new value is set for existing property
    try {
        Database replDb = metaStoreClientMirror.getDatabase(replDbName);
        assertTrue(replDb.getParameters().containsKey(testKey));
        assertEquals(newValue, replDb.getParameters().get(testKey));
        assertEquals(newOwnerName, replDb.getOwnerName());
        assertEquals("ROLE", replDb.getOwnerType().toString());
    } catch (TException e) {
        assertNull(e);
    }
}
Also used : TException(org.apache.thrift.TException) Database(org.apache.hadoop.hive.metastore.api.Database) Test(org.junit.Test)

Aggregations

Database (org.apache.hadoop.hive.metastore.api.Database)236 Test (org.junit.Test)107 Table (org.apache.hadoop.hive.metastore.api.Table)70 ArrayList (java.util.ArrayList)51 MetastoreCheckinTest (org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)39 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)39 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)37 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)36 Partition (org.apache.hadoop.hive.metastore.api.Partition)35 Path (org.apache.hadoop.fs.Path)34 IOException (java.io.IOException)29 HashMap (java.util.HashMap)27 DatabaseBuilder (org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder)26 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)24 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)23 SerDeInfo (org.apache.hadoop.hive.metastore.api.SerDeInfo)22 TableBuilder (org.apache.hadoop.hive.metastore.client.builder.TableBuilder)22 TException (org.apache.thrift.TException)21 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)20 FileSystem (org.apache.hadoop.fs.FileSystem)17