Search in sources :

Example 21 with Table

use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.

the class AcidTableSerializer method encode.

/** Returns a base 64 encoded representation of the supplied {@link AcidTable}. */
public static String encode(AcidTable table) throws IOException {
    DataOutputStream data = null;
    ByteArrayOutputStream bytes = new ByteArrayOutputStream();
    try {
        data = new DataOutputStream(bytes);
        data.writeUTF(table.getDatabaseName());
        data.writeUTF(table.getTableName());
        data.writeBoolean(table.createPartitions());
        if (table.getTransactionId() <= 0) {
            LOG.warn("Transaction ID <= 0. The recipient is probably expecting a transaction ID.");
        }
        data.writeLong(table.getTransactionId());
        data.writeByte(table.getTableType().getId());
        Table metaTable = table.getTable();
        if (metaTable != null) {
            byte[] thrift = new TSerializer(new TCompactProtocol.Factory()).serialize(metaTable);
            data.writeInt(thrift.length);
            data.write(thrift);
        } else {
            LOG.warn("Meta store table is null. The recipient is probably expecting an instance.");
            data.writeInt(0);
        }
    } catch (TException e) {
        throw new IOException("Error serializing meta store table.", e);
    } finally {
        data.close();
    }
    return PROLOG_V1 + new String(Base64.encodeBase64(bytes.toByteArray()), Charset.forName("UTF-8"));
}
Also used : TException(org.apache.thrift.TException) TSerializer(org.apache.thrift.TSerializer) Table(org.apache.hadoop.hive.metastore.api.Table) DataOutputStream(java.io.DataOutputStream) LoggerFactory(org.slf4j.LoggerFactory) ByteArrayOutputStream(java.io.ByteArrayOutputStream) IOException(java.io.IOException)

Example 22 with Table

use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.

the class MutatorClientBuilder method addTable.

private void addTable(String databaseName, String tableName, boolean createPartitions, TableType tableType) {
    if (databaseName == null) {
        throw new IllegalArgumentException("Database cannot be null");
    }
    if (tableName == null) {
        throw new IllegalArgumentException("Table cannot be null");
    }
    String key = (databaseName + "." + tableName).toUpperCase();
    AcidTable previous = tables.get(key);
    if (previous != null) {
        if (tableType == TableType.SINK && previous.getTableType() != TableType.SINK) {
            tables.remove(key);
        } else {
            throw new IllegalArgumentException("Table has already been added: " + databaseName + "." + tableName);
        }
    }
    Table table = new Table();
    table.setDbName(databaseName);
    table.setTableName(tableName);
    tables.put(key, new AcidTable(databaseName, tableName, createPartitions, tableType));
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table)

Example 23 with Table

use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.

the class MetadataJSONSerializer method deserializeTable.

@Override
public HCatTable deserializeTable(String hcatTableStringRep) throws HCatException {
    try {
        Table table = new Table();
        new TDeserializer(new TJSONProtocol.Factory()).deserialize(table, hcatTableStringRep, "UTF-8");
        return new HCatTable(table);
    } catch (TException exception) {
        if (LOG.isDebugEnabled())
            LOG.debug("Could not de-serialize from: " + hcatTableStringRep);
        throw new HCatException("Could not de-serialize HCatTable.", exception);
    }
}
Also used : TException(org.apache.thrift.TException) Table(org.apache.hadoop.hive.metastore.api.Table) TDeserializer(org.apache.thrift.TDeserializer) HCatException(org.apache.hive.hcatalog.common.HCatException) LoggerFactory(org.slf4j.LoggerFactory)

Example 24 with Table

use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.

the class TestMutations method testMulti.

@Test
public void testMulti() throws Exception {
    Table table = partitionedTableBuilder.addPartition(ASIA_INDIA).create(metaStoreClient);
    MutatorClient client = new MutatorClientBuilder().addSinkTable(table.getDbName(), table.getTableName(), true).metaStoreUri(metaStoreUri).build();
    client.connect();
    Transaction transaction = client.newTransaction();
    List<AcidTable> destinations = client.getTables();
    transaction.begin();
    MutatorFactory mutatorFactory = new ReflectiveMutatorFactory(conf, MutableRecord.class, RECORD_ID_COLUMN, BUCKET_COLUMN_INDEXES);
    MutatorCoordinator coordinator = new MutatorCoordinatorBuilder().metaStoreUri(metaStoreUri).table(destinations.get(0)).mutatorFactory(mutatorFactory).build();
    BucketIdResolver bucketIdResolver = mutatorFactory.newBucketIdResolver(destinations.get(0).getTotalBuckets());
    MutableRecord asiaIndiaRecord1 = (MutableRecord) bucketIdResolver.attachBucketIdToRecord(new MutableRecord(1, "Hello streaming"));
    MutableRecord europeUkRecord1 = (MutableRecord) bucketIdResolver.attachBucketIdToRecord(new MutableRecord(2, "Hello streaming"));
    MutableRecord europeFranceRecord1 = (MutableRecord) bucketIdResolver.attachBucketIdToRecord(new MutableRecord(3, "Hello streaming"));
    MutableRecord europeFranceRecord2 = (MutableRecord) bucketIdResolver.attachBucketIdToRecord(new MutableRecord(4, "Bonjour streaming"));
    coordinator.insert(ASIA_INDIA, asiaIndiaRecord1);
    coordinator.insert(EUROPE_UK, europeUkRecord1);
    coordinator.insert(EUROPE_FRANCE, europeFranceRecord1);
    coordinator.insert(EUROPE_FRANCE, europeFranceRecord2);
    coordinator.close();
    transaction.commit();
    // ASIA_INDIA
    StreamingAssert streamingAssertions = assertionFactory.newStreamingAssert(table, ASIA_INDIA);
    streamingAssertions.assertMinTransactionId(1L);
    streamingAssertions.assertMaxTransactionId(1L);
    streamingAssertions.assertExpectedFileCount(1);
    List<Record> readRecords = streamingAssertions.readRecords();
    assertThat(readRecords.size(), is(1));
    assertThat(readRecords.get(0).getRow(), is("{1, Hello streaming}"));
    assertThat(readRecords.get(0).getRecordIdentifier(), is(new RecordIdentifier(1L, 0, 0L)));
    // EUROPE_UK
    streamingAssertions = assertionFactory.newStreamingAssert(table, EUROPE_UK);
    streamingAssertions.assertMinTransactionId(1L);
    streamingAssertions.assertMaxTransactionId(1L);
    streamingAssertions.assertExpectedFileCount(1);
    readRecords = streamingAssertions.readRecords();
    assertThat(readRecords.size(), is(1));
    assertThat(readRecords.get(0).getRow(), is("{2, Hello streaming}"));
    assertThat(readRecords.get(0).getRecordIdentifier(), is(new RecordIdentifier(1L, 0, 0L)));
    // EUROPE_FRANCE
    streamingAssertions = assertionFactory.newStreamingAssert(table, EUROPE_FRANCE);
    streamingAssertions.assertMinTransactionId(1L);
    streamingAssertions.assertMaxTransactionId(1L);
    streamingAssertions.assertExpectedFileCount(1);
    readRecords = streamingAssertions.readRecords();
    assertThat(readRecords.size(), is(2));
    assertThat(readRecords.get(0).getRow(), is("{3, Hello streaming}"));
    assertThat(readRecords.get(0).getRecordIdentifier(), is(new RecordIdentifier(1L, 0, 0L)));
    assertThat(readRecords.get(1).getRow(), is("{4, Bonjour streaming}"));
    assertThat(readRecords.get(1).getRecordIdentifier(), is(new RecordIdentifier(1L, 0, 1L)));
    client.close();
}
Also used : AcidTable(org.apache.hive.hcatalog.streaming.mutate.client.AcidTable) Table(org.apache.hadoop.hive.metastore.api.Table) AcidTable(org.apache.hive.hcatalog.streaming.mutate.client.AcidTable) MutatorCoordinator(org.apache.hive.hcatalog.streaming.mutate.worker.MutatorCoordinator) RecordIdentifier(org.apache.hadoop.hive.ql.io.RecordIdentifier) MutatorFactory(org.apache.hive.hcatalog.streaming.mutate.worker.MutatorFactory) Transaction(org.apache.hive.hcatalog.streaming.mutate.client.Transaction) MutatorCoordinatorBuilder(org.apache.hive.hcatalog.streaming.mutate.worker.MutatorCoordinatorBuilder) BucketIdResolver(org.apache.hive.hcatalog.streaming.mutate.worker.BucketIdResolver) Record(org.apache.hive.hcatalog.streaming.mutate.StreamingAssert.Record) MutatorClient(org.apache.hive.hcatalog.streaming.mutate.client.MutatorClient) MutatorClientBuilder(org.apache.hive.hcatalog.streaming.mutate.client.MutatorClientBuilder) Test(org.junit.Test)

Example 25 with Table

use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.

the class TestMutations method testTransactionBatchAbort.

@Test
public void testTransactionBatchAbort() throws Exception {
    Table table = partitionedTableBuilder.addPartition(ASIA_INDIA).create(metaStoreClient);
    MutatorClient client = new MutatorClientBuilder().addSinkTable(table.getDbName(), table.getTableName(), true).metaStoreUri(metaStoreUri).build();
    client.connect();
    Transaction transaction = client.newTransaction();
    List<AcidTable> destinations = client.getTables();
    transaction.begin();
    MutatorFactory mutatorFactory = new ReflectiveMutatorFactory(conf, MutableRecord.class, RECORD_ID_COLUMN, BUCKET_COLUMN_INDEXES);
    MutatorCoordinator coordinator = new MutatorCoordinatorBuilder().metaStoreUri(metaStoreUri).table(destinations.get(0)).mutatorFactory(mutatorFactory).build();
    BucketIdResolver bucketIdResolver = mutatorFactory.newBucketIdResolver(destinations.get(0).getTotalBuckets());
    MutableRecord record1 = (MutableRecord) bucketIdResolver.attachBucketIdToRecord(new MutableRecord(1, "Hello streaming"));
    MutableRecord record2 = (MutableRecord) bucketIdResolver.attachBucketIdToRecord(new MutableRecord(2, "Welcome to streaming"));
    coordinator.insert(ASIA_INDIA, record1);
    coordinator.insert(ASIA_INDIA, record2);
    coordinator.close();
    transaction.abort();
    assertThat(transaction.getState(), is(ABORTED));
    client.close();
    StreamingAssert streamingAssertions = assertionFactory.newStreamingAssert(table, ASIA_INDIA);
    streamingAssertions.assertNothingWritten();
}
Also used : AcidTable(org.apache.hive.hcatalog.streaming.mutate.client.AcidTable) Table(org.apache.hadoop.hive.metastore.api.Table) AcidTable(org.apache.hive.hcatalog.streaming.mutate.client.AcidTable) MutatorCoordinator(org.apache.hive.hcatalog.streaming.mutate.worker.MutatorCoordinator) MutatorFactory(org.apache.hive.hcatalog.streaming.mutate.worker.MutatorFactory) Transaction(org.apache.hive.hcatalog.streaming.mutate.client.Transaction) MutatorCoordinatorBuilder(org.apache.hive.hcatalog.streaming.mutate.worker.MutatorCoordinatorBuilder) BucketIdResolver(org.apache.hive.hcatalog.streaming.mutate.worker.BucketIdResolver) MutatorClient(org.apache.hive.hcatalog.streaming.mutate.client.MutatorClient) MutatorClientBuilder(org.apache.hive.hcatalog.streaming.mutate.client.MutatorClientBuilder) Test(org.junit.Test)

Aggregations

Table (org.apache.hadoop.hive.metastore.api.Table)355 Test (org.junit.Test)198 ArrayList (java.util.ArrayList)169 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)143 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)139 Partition (org.apache.hadoop.hive.metastore.api.Partition)126 SerDeInfo (org.apache.hadoop.hive.metastore.api.SerDeInfo)123 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)56 HashMap (java.util.HashMap)55 Path (org.apache.hadoop.fs.Path)53 Database (org.apache.hadoop.hive.metastore.api.Database)53 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)48 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)48 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)48 TException (org.apache.thrift.TException)41 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)40 List (java.util.List)38 ShowCompactResponseElement (org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)35 FileSystem (org.apache.hadoop.fs.FileSystem)31 ColumnStatisticsDesc (org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc)31