Search in sources :

Example 41 with Cache

use of com.google.common.cache.Cache in project calcite-avatica by apache.

the class RemoteMetaTest method getConnection.

private static Connection getConnection(JdbcMeta m, String id) throws Exception {
    Field f = JdbcMeta.class.getDeclaredField("connectionCache");
    f.setAccessible(true);
    // noinspection unchecked
    Cache<String, Connection> connectionCache = (Cache<String, Connection>) f.get(m);
    return connectionCache.getIfPresent(id);
}
Also used : Field(java.lang.reflect.Field) HttpURLConnection(java.net.HttpURLConnection) Connection(java.sql.Connection) AvaticaConnection(org.apache.calcite.avatica.AvaticaConnection) StringContains.containsString(org.hamcrest.core.StringContains.containsString) Cache(com.google.common.cache.Cache)

Example 42 with Cache

use of com.google.common.cache.Cache in project hive by apache.

the class AcidUtils method makeLockComponents.

/**
 * Create lock components from write/read entities.
 * @param outputs write entities
 * @param inputs read entities
 * @param conf
 * @return list with lock components
 */
public static List<LockComponent> makeLockComponents(Set<WriteEntity> outputs, Set<ReadEntity> inputs, Context.Operation operation, HiveConf conf) {
    List<LockComponent> lockComponents = new ArrayList<>();
    boolean skipReadLock = !conf.getBoolVar(ConfVars.HIVE_TXN_READ_LOCKS);
    boolean skipNonAcidReadLock = !conf.getBoolVar(ConfVars.HIVE_TXN_NONACID_READ_LOCKS);
    boolean sharedWrite = !conf.getBoolVar(HiveConf.ConfVars.TXN_WRITE_X_LOCK);
    boolean isMerge = operation == Context.Operation.MERGE;
    // We don't want to acquire read locks during update or delete as we'll be acquiring write
    // locks instead. Also, there's no need to lock temp tables since they're session wide
    List<ReadEntity> readEntities = inputs.stream().filter(input -> !input.isDummy() && input.needsLock() && !input.isUpdateOrDelete() && AcidUtils.needsLock(input) && !skipReadLock).collect(Collectors.toList());
    Set<Table> fullTableLock = getFullTableLock(readEntities, conf);
    // For each source to read, get a shared_read lock
    for (ReadEntity input : readEntities) {
        LockComponentBuilder compBuilder = new LockComponentBuilder();
        compBuilder.setSharedRead();
        compBuilder.setOperationType(DataOperationType.SELECT);
        Table t = null;
        switch(input.getType()) {
            case DATABASE:
                compBuilder.setDbName(input.getDatabase().getName());
                break;
            case TABLE:
                t = input.getTable();
                if (!fullTableLock.contains(t)) {
                    continue;
                }
                compBuilder.setDbName(t.getDbName());
                compBuilder.setTableName(t.getTableName());
                break;
            case PARTITION:
            case DUMMYPARTITION:
                compBuilder.setPartitionName(input.getPartition().getName());
                t = input.getPartition().getTable();
                if (fullTableLock.contains(t)) {
                    continue;
                }
                compBuilder.setDbName(t.getDbName());
                compBuilder.setTableName(t.getTableName());
                break;
            default:
                // This is a file or something we don't hold locks for.
                continue;
        }
        if (skipNonAcidReadLock && !AcidUtils.isTransactionalTable(t)) {
            // read-locks don't protect non-transactional tables data consistency
            continue;
        }
        if (t != null) {
            compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
        }
        LockComponent comp = compBuilder.build();
        LOG.debug("Adding lock component to lock request {} ", comp);
        lockComponents.add(comp);
    }
    // need a SHARED_WRITE.
    for (WriteEntity output : outputs) {
        LOG.debug("output is null " + (output == null));
        if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR || !AcidUtils.needsLock(output)) {
            // We don't lock files or directories. We also skip locking temp tables.
            continue;
        }
        LockComponentBuilder compBuilder = new LockComponentBuilder();
        Table t = null;
        /**
         * For any insert/updates set dir cache to read-only mode, where it wouldn't
         * add any new entry to cache.
         * When updates are executed, delta folders are created only at the end of the statement
         * and at the time of acquiring locks, there would not be any delta folders. This can cause wrong data to be reported
         * when "insert" followed by "update" statements are executed. In such cases, use the cache as read only mode.
         */
        HiveConf.setIntVar(conf, ConfVars.HIVE_TXN_ACID_DIR_CACHE_DURATION, 0);
        switch(output.getType()) {
            case DATABASE:
                compBuilder.setDbName(output.getDatabase().getName());
                break;
            case TABLE:
            case // in case of dynamic partitioning lock the table
            DUMMYPARTITION:
                t = output.getTable();
                compBuilder.setDbName(t.getDbName());
                compBuilder.setTableName(t.getTableName());
                break;
            case PARTITION:
                compBuilder.setPartitionName(output.getPartition().getName());
                t = output.getPartition().getTable();
                compBuilder.setDbName(t.getDbName());
                compBuilder.setTableName(t.getTableName());
                break;
            default:
                // This is a file or something we don't hold locks for.
                continue;
        }
        switch(output.getWriteType()) {
            /* base this on HiveOperation instead?  this and DDL_NO_LOCK is peppered all over the code...
         Seems much cleaner if each stmt is identified as a particular HiveOperation (which I'd think
         makes sense everywhere).  This however would be problematic for merge...*/
            case DDL_EXCLUSIVE:
                compBuilder.setExclusive();
                compBuilder.setOperationType(DataOperationType.NO_TXN);
                break;
            case DDL_EXCL_WRITE:
                compBuilder.setExclWrite();
                compBuilder.setOperationType(DataOperationType.NO_TXN);
                break;
            case INSERT_OVERWRITE:
                t = AcidUtils.getTable(output);
                if (AcidUtils.isTransactionalTable(t)) {
                    if (conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK) && !sharedWrite) {
                        compBuilder.setExclusive();
                    } else {
                        compBuilder.setExclWrite();
                    }
                    compBuilder.setOperationType(DataOperationType.UPDATE);
                } else {
                    compBuilder.setExclusive();
                    compBuilder.setOperationType(DataOperationType.NO_TXN);
                }
                break;
            case INSERT:
                assert t != null;
                if (AcidUtils.isTransactionalTable(t)) {
                    boolean isExclMergeInsert = conf.getBoolVar(ConfVars.TXN_MERGE_INSERT_X_LOCK) && isMerge;
                    if (sharedWrite) {
                        compBuilder.setSharedWrite();
                    } else {
                        if (isExclMergeInsert) {
                            compBuilder.setExclWrite();
                        } else {
                            compBuilder.setSharedRead();
                        }
                    }
                    if (isExclMergeInsert) {
                        compBuilder.setOperationType(DataOperationType.UPDATE);
                        break;
                    }
                } else if (MetaStoreUtils.isNonNativeTable(t.getTTable())) {
                    final HiveStorageHandler storageHandler = Preconditions.checkNotNull(t.getStorageHandler(), "Thought all the non native tables have an instance of storage handler");
                    LockType lockType = storageHandler.getLockType(output);
                    if (null == LockType.findByValue(lockType.getValue())) {
                        throw new IllegalArgumentException(String.format("Lock type [%s] for Database.Table [%s.%s] is unknown", lockType, t.getDbName(), t.getTableName()));
                    }
                    compBuilder.setLock(lockType);
                } else {
                    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) {
                        compBuilder.setExclusive();
                    } else {
                        // this is backward compatible for non-ACID resources, w/o ACID semantics
                        compBuilder.setSharedRead();
                    }
                }
                compBuilder.setOperationType(DataOperationType.INSERT);
                break;
            case DDL_SHARED:
                compBuilder.setSharedRead();
                if (output.isTxnAnalyze()) {
                    // still present.
                    continue;
                }
                compBuilder.setOperationType(DataOperationType.NO_TXN);
                break;
            case UPDATE:
            case DELETE:
                assert t != null;
                if (AcidUtils.isTransactionalTable(t) && sharedWrite) {
                    compBuilder.setSharedWrite();
                } else {
                    compBuilder.setExclWrite();
                }
                compBuilder.setOperationType(DataOperationType.valueOf(output.getWriteType().name()));
                break;
            case DDL_NO_LOCK:
                // No lock required here
                continue;
            default:
                throw new RuntimeException("Unknown write type " + output.getWriteType().toString());
        }
        if (t != null) {
            compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
        }
        compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite());
        LockComponent comp = compBuilder.build();
        LOG.debug("Adding lock component to lock request " + comp.toString());
        lockComponents.add(comp);
    }
    return lockComponents;
}
Also used : OrcFile(org.apache.hadoop.hive.ql.io.orc.OrcFile) OrcRecordUpdater(org.apache.hadoop.hive.ql.io.orc.OrcRecordUpdater) Arrays(java.util.Arrays) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) LockComponentBuilder(org.apache.hadoop.hive.metastore.LockComponentBuilder) FileSystem(org.apache.hadoop.fs.FileSystem) URISyntaxException(java.net.URISyntaxException) ConfVars(org.apache.hadoop.hive.conf.HiveConf.ConfVars) LoggerFactory(org.slf4j.LoggerFactory) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) FileStatus(org.apache.hadoop.fs.FileStatus) TableScanDesc(org.apache.hadoop.hive.ql.plan.TableScanDesc) Writer(org.apache.hadoop.hive.ql.io.orc.Writer) OrcAcidUtils(org.apache.orc.impl.OrcAcidUtils) Matcher(java.util.regex.Matcher) Pair(org.apache.commons.lang3.tuple.Pair) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) Path(org.apache.hadoop.fs.Path) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) Context(org.apache.hadoop.hive.ql.Context) Reader(org.apache.hadoop.hive.ql.io.orc.Reader) URI(java.net.URI) ValidReaderWriteIdList(org.apache.hadoop.hive.common.ValidReaderWriteIdList) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FileFormatException(org.apache.orc.FileFormatException) MetaStoreUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils) HdfsFileStatusWithoutId(org.apache.hadoop.hive.ql.io.HdfsUtils.HdfsFileStatusWithoutId) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) HadoopShims(org.apache.hadoop.hive.shims.HadoopShims) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Set(java.util.Set) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) SessionState(org.apache.hadoop.hive.ql.session.SessionState) Collectors(java.util.stream.Collectors) FileNotFoundException(java.io.FileNotFoundException) Serializable(java.io.Serializable) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) ValidReadTxnList(org.apache.hadoop.hive.common.ValidReadTxnList) List(java.util.List) Stream(java.util.stream.Stream) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) Pattern(java.util.regex.Pattern) CacheBuilder(com.google.common.cache.CacheBuilder) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) ErrorMsg(org.apache.hadoop.hive.ql.ErrorMsg) LockType(org.apache.hadoop.hive.metastore.api.LockType) HdfsFileStatusWithId(org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) PathFilter(org.apache.hadoop.fs.PathFilter) HashMap(java.util.HashMap) Supplier(java.util.function.Supplier) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) TransactionalValidationListener(org.apache.hadoop.hive.metastore.TransactionalValidationListener) DataOperationType(org.apache.hadoop.hive.metastore.api.DataOperationType) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) Strings(com.google.common.base.Strings) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Charset(java.nio.charset.Charset) CreateMaterializedViewDesc(org.apache.hadoop.hive.ql.ddl.view.create.CreateMaterializedViewDesc) LoadSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer) TableName(org.apache.hadoop.hive.common.TableName) Entity(org.apache.hadoop.hive.ql.hooks.Entity) DeltaFileMetaData(org.apache.hadoop.hive.ql.io.AcidInputFormat.DeltaFileMetaData) Ref(org.apache.hive.common.util.Ref) Properties(java.util.Properties) Logger(org.slf4j.Logger) LongStream(java.util.stream.LongStream) AcidMetaDataFile(org.apache.hadoop.hive.common.AcidMetaDataFile) HiveConf(org.apache.hadoop.hive.conf.HiveConf) HiveParser(org.apache.hadoop.hive.ql.parse.HiveParser) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) IOException(java.io.IOException) Table(org.apache.hadoop.hive.ql.metadata.Table) AcidConstants(org.apache.hadoop.hive.common.AcidConstants) ASTSearcher(org.apache.hadoop.hive.ql.parse.CalcitePlanner.ASTSearcher) TimeUnit(java.util.concurrent.TimeUnit) Partition(org.apache.hadoop.hive.ql.metadata.Partition) COPY_KEYWORD(org.apache.hadoop.hive.ql.exec.Utilities.COPY_KEYWORD) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) ShimLoader(org.apache.hadoop.hive.shims.ShimLoader) Preconditions(com.google.common.base.Preconditions) TxnType(org.apache.hadoop.hive.metastore.api.TxnType) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) Cache(com.google.common.cache.Cache) Collections(java.util.Collections) org.apache.hadoop.hive.metastore.api.hive_metastoreConstants(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants) Immutable(javax.annotation.concurrent.Immutable) InputStream(java.io.InputStream) HiveStorageHandler(org.apache.hadoop.hive.ql.metadata.HiveStorageHandler) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) Table(org.apache.hadoop.hive.ql.metadata.Table) ArrayList(java.util.ArrayList) LockType(org.apache.hadoop.hive.metastore.api.LockType) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) LockComponentBuilder(org.apache.hadoop.hive.metastore.LockComponentBuilder) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 43 with Cache

use of com.google.common.cache.Cache in project apollo by ctripcorp.

the class ConfigFileControllerTest method testHandleMessage.

@Test
public void testHandleMessage() throws Exception {
    String someWatchKey = "someWatchKey";
    String anotherWatchKey = "anotherWatchKey";
    String someCacheKey = "someCacheKey";
    String anotherCacheKey = "anotherCacheKey";
    String someValue = "someValue";
    ReleaseMessage someReleaseMessage = mock(ReleaseMessage.class);
    when(someReleaseMessage.getMessage()).thenReturn(someWatchKey);
    Cache<String, String> cache = (Cache<String, String>) ReflectionTestUtils.getField(configFileController, "localCache");
    cache.put(someCacheKey, someValue);
    cache.put(anotherCacheKey, someValue);
    watchedKeys2CacheKey.putAll(someWatchKey, Lists.newArrayList(someCacheKey, anotherCacheKey));
    watchedKeys2CacheKey.putAll(anotherWatchKey, Lists.newArrayList(someCacheKey, anotherCacheKey));
    cacheKey2WatchedKeys.putAll(someCacheKey, Lists.newArrayList(someWatchKey, anotherWatchKey));
    cacheKey2WatchedKeys.putAll(anotherCacheKey, Lists.newArrayList(someWatchKey, anotherWatchKey));
    configFileController.handleMessage(someReleaseMessage, Topics.APOLLO_RELEASE_TOPIC);
    assertTrue(watchedKeys2CacheKey.isEmpty());
    assertTrue(cacheKey2WatchedKeys.isEmpty());
}
Also used : ReleaseMessage(com.ctrip.framework.apollo.biz.entity.ReleaseMessage) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Cache(com.google.common.cache.Cache) Test(org.junit.Test)

Aggregations

Cache (com.google.common.cache.Cache)43 List (java.util.List)21 CacheBuilder (com.google.common.cache.CacheBuilder)19 Set (java.util.Set)11 Collectors (java.util.stream.Collectors)11 ArrayList (java.util.ArrayList)10 Optional (java.util.Optional)10 TimeUnit (java.util.concurrent.TimeUnit)10 MILLISECONDS (java.util.concurrent.TimeUnit.MILLISECONDS)10 Module (com.google.inject.Module)9 CachingOrcFileTailSource (com.facebook.presto.orc.cache.CachingOrcFileTailSource)8 OrcFileTailSource (com.facebook.presto.orc.cache.OrcFileTailSource)8 StorageOrcFileTailSource (com.facebook.presto.orc.cache.StorageOrcFileTailSource)8 OrcFileTail (com.facebook.presto.orc.metadata.OrcFileTail)8 ExecutorService (java.util.concurrent.ExecutorService)8 ConfigBinder.configBinder (com.facebook.airlift.configuration.ConfigBinder.configBinder)7 CachingStripeMetadataSource (com.facebook.presto.orc.CachingStripeMetadataSource)7 DwrfAwareStripeMetadataSourceFactory (com.facebook.presto.orc.DwrfAwareStripeMetadataSourceFactory)7 OrcDataSourceId (com.facebook.presto.orc.OrcDataSourceId)7 StorageStripeMetadataSource (com.facebook.presto.orc.StorageStripeMetadataSource)7