Search in sources :

Example 61 with PartitionSpecProxy

use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.

the class TestAddPartitionsFromPartSpec method testAddPartitionSpecWithSharedSDWithoutRelativePath.

@Test
public void testAddPartitionSpecWithSharedSDWithoutRelativePath() throws Exception {
    Table table = createTable();
    PartitionWithoutSD partition = buildPartitionWithoutSD(Lists.newArrayList("2014"), 0);
    partition.setRelativePath(null);
    String location = table.getSd().getLocation() + "/sharedSDTest/";
    PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD(Lists.newArrayList(partition), buildSD(location));
    client.add_partitions_pspec(partitionSpecProxy);
    Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=2014");
    Assert.assertNotNull(part);
    Assert.assertEquals(table.getSd().getLocation() + "/sharedSDTest/null", part.getSd().getLocation());
    Assert.assertTrue(metaStore.isPathExists(new Path(part.getSd().getLocation())));
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionWithoutSD(org.apache.hadoop.hive.metastore.api.PartitionWithoutSD) PartitionSpecProxy(org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Example 62 with PartitionSpecProxy

use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.

the class TestAddPartitionsFromPartSpec method testAddPartitionSpecNonExistingTable.

@Test(expected = InvalidObjectException.class)
public void testAddPartitionSpecNonExistingTable() throws Exception {
    createTable();
    Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE);
    PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, "nonexistingtable", null, Lists.newArrayList(partition));
    client.add_partitions_pspec(partitionSpecProxy);
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) PartitionSpecProxy(org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Example 63 with PartitionSpecProxy

use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.

the class HMSHandler method add_partitions_pspec_core.

private int add_partitions_pspec_core(RawStore ms, String catName, String dbName, String tblName, List<PartitionSpec> partSpecs, boolean ifNotExists) throws TException {
    boolean success = false;
    if (dbName == null || tblName == null) {
        throw new MetaException("The database and table name cannot be null.");
    }
    // Ensures that the list doesn't have dups, and keeps track of directories we have created.
    final Map<PartValEqWrapperLite, Boolean> addedPartitions = new ConcurrentHashMap<>();
    PartitionSpecProxy partitionSpecProxy = PartitionSpecProxy.Factory.get(partSpecs);
    final PartitionSpecProxy.PartitionIterator partitionIterator = partitionSpecProxy.getPartitionIterator();
    Table tbl = null;
    Map<String, String> transactionalListenerResponses = Collections.emptyMap();
    Database db = null;
    Lock tableLock = getTableLockFor(dbName, tblName);
    tableLock.lock();
    try {
        ms.openTransaction();
        try {
            db = ms.getDatabase(catName, dbName);
        } catch (NoSuchObjectException notExists) {
            throw new InvalidObjectException("Unable to add partitions because " + "database or table " + dbName + "." + tblName + " does not exist");
        }
        if (db.getType() == DatabaseType.REMOTE) {
            throw new MetaException("Operation add_partitions_pspec not supported on tables in REMOTE database");
        }
        tbl = ms.getTable(catName, dbName, tblName, null);
        if (tbl == null) {
            throw new InvalidObjectException("Unable to add partitions because " + "database or table " + dbName + "." + tblName + " does not exist");
        }
        firePreEvent(new PreAddPartitionEvent(tbl, partitionSpecProxy, this));
        Set<PartValEqWrapperLite> partsToAdd = new HashSet<>(partitionSpecProxy.size());
        List<Partition> partitionsToAdd = new ArrayList<>(partitionSpecProxy.size());
        List<FieldSchema> partitionKeys = tbl.getPartitionKeys();
        while (partitionIterator.hasNext()) {
            // Iterate through the partitions and validate them. If one of the partitions is
            // incorrect, an exception will be thrown before the threads which create the partition
            // folders are submitted. This way we can be sure that no partition or partition folder
            // will be created if the list contains an invalid partition.
            final Partition part = partitionIterator.getCurrent();
            if (validatePartition(part, catName, tblName, dbName, partsToAdd, ms, ifNotExists, partitionKeys)) {
                partitionsToAdd.add(part);
            }
            partitionIterator.next();
        }
        createPartitionFolders(partitionsToAdd, tbl, addedPartitions);
        ms.addPartitions(catName, dbName, tblName, partitionSpecProxy, ifNotExists);
        if (!transactionalListeners.isEmpty()) {
            transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PARTITION, new AddPartitionEvent(tbl, partitionSpecProxy, true, this));
        }
        success = ms.commitTransaction();
        return addedPartitions.size();
    } finally {
        try {
            if (!success) {
                ms.rollbackTransaction();
                cleanupPartitionFolders(addedPartitions, db);
            }
            if (!listeners.isEmpty()) {
                MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, new AddPartitionEvent(tbl, partitionSpecProxy, true, this), null, transactionalListenerResponses, ms);
            }
        } finally {
            tableLock.unlock();
        }
    }
}
Also used : ArrayList(java.util.ArrayList) Lock(java.util.concurrent.locks.Lock) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) PartitionSpecProxy(org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy) ExceptionHandler.newMetaException(org.apache.hadoop.hive.metastore.ExceptionHandler.newMetaException) ExceptionHandler.throwMetaException(org.apache.hadoop.hive.metastore.ExceptionHandler.throwMetaException) HashSet(java.util.HashSet)

Example 64 with PartitionSpecProxy

use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.

the class TestHiveMetaStorePartitionSpecs method testGetPartitionSpecs.

private void testGetPartitionSpecs(boolean enablePartitionGrouping) {
    try {
        HiveMetaStoreClient hmsc = new HiveMetaStoreClient(conf);
        clearAndRecreateDB(hmsc);
        createTable(hmsc, enablePartitionGrouping);
        Table table = hmsc.getTable(dbName, tableName);
        populatePartitions(hmsc, table, Arrays.asList("isLocatedInTablePath", "isLocatedOutsideTablePath"));
        PartitionSpecProxy partitionSpecProxy = hmsc.listPartitionSpecs(dbName, tableName, -1);
        Assert.assertEquals("Unexpected number of partitions.", nDates * 2, partitionSpecProxy.size());
        Map<String, List<String>> locationToDateMap = new HashMap<>();
        locationToDateMap.put("isLocatedInTablePath", new ArrayList<>());
        locationToDateMap.put("isLocatedOutsideTablePath", new ArrayList<>());
        PartitionSpecProxy.PartitionIterator iterator = partitionSpecProxy.getPartitionIterator();
        while (iterator.hasNext()) {
            Partition partition = iterator.next();
            locationToDateMap.get(partition.getValues().get(1)).add(partition.getValues().get(0));
        }
        List<String> expectedDates = new ArrayList<>(nDates);
        for (int i = 0; i < nDates; ++i) {
            expectedDates.add(datePrefix + i);
        }
        Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedInTablePath").toArray());
        Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedOutsideTablePath").toArray());
        partitionSpecProxy = hmsc.listPartitionSpecsByFilter(dbName, tableName, "blurb = \"isLocatedOutsideTablePath\"", -1);
        locationToDateMap.get("isLocatedInTablePath").clear();
        locationToDateMap.get("isLocatedOutsideTablePath").clear();
        iterator = partitionSpecProxy.getPartitionIterator();
        while (iterator.hasNext()) {
            Partition partition = iterator.next();
            locationToDateMap.get(partition.getValues().get(1)).add(partition.getValues().get(0));
        }
        Assert.assertEquals("Unexpected date-values.", 0, locationToDateMap.get("isLocatedInTablePath").size());
        Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedOutsideTablePath").toArray());
    } catch (Throwable t) {
        LOG.error("Unexpected Exception!", t);
        t.printStackTrace();
        Assert.assertTrue("Unexpected Exception!", false);
    }
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) PartitionSpecProxy(org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy) CompositePartitionSpecProxy(org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy)

Example 65 with PartitionSpecProxy

use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.

the class TestListPartitions method testListPartitionSpecs.

/**
 * Testing listPartitionSpecs(String,String,int) ->
 *         get_partitions_pspec(String,String,int).
 */
@Test
public void testListPartitionSpecs() throws Exception {
    List<List<String>> testValues = createTable4PartColsParts(client).testValues;
    PartitionSpecProxy partSpecProxy = client.listPartitionSpecs(DB_NAME, TABLE_NAME, -1);
    assertPartitionsSpecProxy(partSpecProxy, testValues);
    partSpecProxy = client.listPartitionSpecs(DB_NAME, TABLE_NAME, 2);
    assertPartitionsSpecProxy(partSpecProxy, testValues.subList(0, 2));
    partSpecProxy = client.listPartitionSpecs(DB_NAME, TABLE_NAME, 0);
    assertPartitionsSpecProxy(partSpecProxy, testValues.subList(0, 0));
}
Also used : ArrayList(java.util.ArrayList) List(java.util.List) PartitionSpecProxy(org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Aggregations

PartitionSpecProxy (org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy)69 Test (org.junit.Test)60 MetastoreCheckinTest (org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)59 Partition (org.apache.hadoop.hive.metastore.api.Partition)53 Table (org.apache.hadoop.hive.metastore.api.Table)24 PartitionWithoutSD (org.apache.hadoop.hive.metastore.api.PartitionWithoutSD)15 ArrayList (java.util.ArrayList)13 Path (org.apache.hadoop.fs.Path)11 List (java.util.List)7 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)7 HashMap (java.util.HashMap)4 PartitionSpec (org.apache.hadoop.hive.metastore.api.PartitionSpec)4 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)3 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)3 PartitionBuilder (org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder)3 CompositePartitionSpecProxy (org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy)3 PartitionSpecWithSharedSD (org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD)2 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)2 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)2 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)2