Search in sources :

Example 46 with Transaction

use of org.apache.tephra.Transaction in project cdap by caskdata.

the class MapReduceTaskContextProvider method createCacheLoader.

/**
 * Creates a {@link CacheLoader} for the task context cache.
 */
private CacheLoader<ContextCacheKey, BasicMapReduceTaskContext> createCacheLoader(final Injector injector) {
    final DiscoveryServiceClient discoveryServiceClient = injector.getInstance(DiscoveryServiceClient.class);
    final DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
    final SecureStore secureStore = injector.getInstance(SecureStore.class);
    final SecureStoreManager secureStoreManager = injector.getInstance(SecureStoreManager.class);
    final MessagingService messagingService = injector.getInstance(MessagingService.class);
    // Multiple instances of BasicMapReduceTaskContext can share the same program.
    final AtomicReference<Program> programRef = new AtomicReference<>();
    return new CacheLoader<ContextCacheKey, BasicMapReduceTaskContext>() {

        @Override
        public BasicMapReduceTaskContext load(ContextCacheKey key) throws Exception {
            TaskAttemptID taskAttemptId = key.getTaskAttemptID();
            // taskAttemptId could be null if used from a org.apache.hadoop.mapreduce.Partitioner or
            // from a org.apache.hadoop.io.RawComparator, in which case we can get the JobId from the conf. Note that the
            // JobId isn't in the conf for the OutputCommitter#setupJob method, in which case we use the taskAttemptId
            Path txFile = MainOutputCommitter.getTxFile(key.getConfiguration(), taskAttemptId != null ? taskAttemptId.getJobID() : null);
            FileSystem fs = txFile.getFileSystem(key.getConfiguration());
            Preconditions.checkArgument(fs.exists(txFile));
            Transaction tx;
            try (FSDataInputStream txFileInputStream = fs.open(txFile)) {
                byte[] txByteArray = ByteStreams.toByteArray(txFileInputStream);
                tx = new TransactionCodec().decode(txByteArray);
            }
            MapReduceContextConfig contextConfig = new MapReduceContextConfig(key.getConfiguration());
            MapReduceClassLoader classLoader = MapReduceClassLoader.getFromConfiguration(key.getConfiguration());
            Program program = programRef.get();
            if (program == null) {
                // Creation of program is relatively cheap, so just create and do compare and set.
                programRef.compareAndSet(null, createProgram(contextConfig, classLoader.getProgramClassLoader()));
                program = programRef.get();
            }
            WorkflowProgramInfo workflowInfo = contextConfig.getWorkflowProgramInfo();
            DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, program.getApplicationSpecification());
            // Setup dataset framework context, if required
            if (programDatasetFramework instanceof ProgramContextAware) {
                ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(contextConfig.getProgramOptions()));
                ((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
            }
            MapReduceSpecification spec = program.getApplicationSpecification().getMapReduce().get(program.getName());
            MetricsCollectionService metricsCollectionService = null;
            MapReduceMetrics.TaskType taskType = null;
            String taskId = null;
            ProgramOptions options = contextConfig.getProgramOptions();
            // from a org.apache.hadoop.io.RawComparator
            if (taskAttemptId != null) {
                taskId = taskAttemptId.getTaskID().toString();
                if (MapReduceMetrics.TaskType.hasType(taskAttemptId.getTaskType())) {
                    taskType = MapReduceMetrics.TaskType.from(taskAttemptId.getTaskType());
                    // if this is not for a mapper or a reducer, we don't need the metrics collection service
                    metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
                    options = new SimpleProgramOptions(options.getProgramId(), options.getArguments(), new BasicArguments(RuntimeArguments.extractScope("task", taskType.toString().toLowerCase(), contextConfig.getProgramOptions().getUserArguments().asMap())), options.isDebug());
                }
            }
            CConfiguration cConf = injector.getInstance(CConfiguration.class);
            TransactionSystemClient txClient = injector.getInstance(TransactionSystemClient.class);
            return new BasicMapReduceTaskContext(program, options, cConf, taskType, taskId, spec, workflowInfo, discoveryServiceClient, metricsCollectionService, txClient, tx, programDatasetFramework, classLoader.getPluginInstantiator(), contextConfig.getLocalizedResources(), secureStore, secureStoreManager, authorizationEnforcer, authenticationContext, messagingService, mapReduceClassLoader);
        }
    };
}
Also used : DiscoveryServiceClient(org.apache.twill.discovery.DiscoveryServiceClient) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) NameMappedDatasetFramework(co.cask.cdap.internal.app.runtime.workflow.NameMappedDatasetFramework) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) FileSystem(org.apache.hadoop.fs.FileSystem) SecureStoreManager(co.cask.cdap.api.security.store.SecureStoreManager) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) MapReduceMetrics(co.cask.cdap.app.metrics.MapReduceMetrics) Path(org.apache.hadoop.fs.Path) Program(co.cask.cdap.app.program.Program) DefaultProgram(co.cask.cdap.app.program.DefaultProgram) MetricsCollectionService(co.cask.cdap.api.metrics.MetricsCollectionService) MapReduceSpecification(co.cask.cdap.api.mapreduce.MapReduceSpecification) AtomicReference(java.util.concurrent.atomic.AtomicReference) BasicProgramContext(co.cask.cdap.internal.app.runtime.BasicProgramContext) SecureStore(co.cask.cdap.api.security.store.SecureStore) CConfiguration(co.cask.cdap.common.conf.CConfiguration) SimpleProgramOptions(co.cask.cdap.internal.app.runtime.SimpleProgramOptions) ProgramOptions(co.cask.cdap.app.runtime.ProgramOptions) MessagingService(co.cask.cdap.messaging.MessagingService) Transaction(org.apache.tephra.Transaction) WorkflowProgramInfo(co.cask.cdap.internal.app.runtime.workflow.WorkflowProgramInfo) TransactionCodec(org.apache.tephra.TransactionCodec) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) CacheLoader(com.google.common.cache.CacheLoader) ProgramRunId(co.cask.cdap.proto.id.ProgramRunId) SimpleProgramOptions(co.cask.cdap.internal.app.runtime.SimpleProgramOptions) ProgramContextAware(co.cask.cdap.data.ProgramContextAware)

Example 47 with Transaction

use of org.apache.tephra.Transaction in project phoenix by apache.

the class TephraTransactionContext method getVisibilityLevel.

@Override
public PhoenixVisibilityLevel getVisibilityLevel() {
    VisibilityLevel visibilityLevel = null;
    Transaction tx = getCurrentTransaction();
    assert (tx != null);
    visibilityLevel = tx.getVisibilityLevel();
    PhoenixVisibilityLevel phoenixVisibilityLevel;
    switch(visibilityLevel) {
        case SNAPSHOT:
            phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT;
            break;
        case SNAPSHOT_EXCLUDE_CURRENT:
            phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT;
            break;
        case SNAPSHOT_ALL:
            phoenixVisibilityLevel = PhoenixVisibilityLevel.SNAPSHOT_ALL;
        default:
            phoenixVisibilityLevel = null;
    }
    return phoenixVisibilityLevel;
}
Also used : Transaction(org.apache.tephra.Transaction) VisibilityLevel(org.apache.tephra.Transaction.VisibilityLevel)

Example 48 with Transaction

use of org.apache.tephra.Transaction in project phoenix by apache.

the class TephraTransactionContext method setVisibilityLevel.

@Override
public void setVisibilityLevel(PhoenixVisibilityLevel visibilityLevel) {
    VisibilityLevel tephraVisibilityLevel = null;
    switch(visibilityLevel) {
        case SNAPSHOT:
            tephraVisibilityLevel = VisibilityLevel.SNAPSHOT;
            break;
        case SNAPSHOT_EXCLUDE_CURRENT:
            tephraVisibilityLevel = VisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT;
            break;
        case SNAPSHOT_ALL:
            tephraVisibilityLevel = VisibilityLevel.SNAPSHOT_ALL;
            break;
        default:
            assert (false);
    }
    Transaction tx = getCurrentTransaction();
    assert (tx != null);
    tx.setVisibility(tephraVisibilityLevel);
}
Also used : Transaction(org.apache.tephra.Transaction) VisibilityLevel(org.apache.tephra.Transaction.VisibilityLevel)

Example 49 with Transaction

use of org.apache.tephra.Transaction in project cdap by caskdata.

the class TransactionHttpHandler method invalidList.

@Path("/transactions/invalid")
@GET
public void invalidList(HttpRequest request, HttpResponder responder, @QueryParam("limit") @DefaultValue("-1") int limit) {
    Transaction tx = txClient.startShort();
    txClient.abort(tx);
    long[] invalids = tx.getInvalids();
    if (limit == -1) {
        responder.sendJson(HttpResponseStatus.OK, GSON.toJson(invalids));
        return;
    }
    responder.sendJson(HttpResponseStatus.OK, GSON.toJson(Arrays.copyOf(invalids, Math.min(limit, invalids.length))));
}
Also used : Transaction(org.apache.tephra.Transaction) Path(javax.ws.rs.Path) GET(javax.ws.rs.GET)

Example 50 with Transaction

use of org.apache.tephra.Transaction in project phoenix by apache.

the class PhoenixIndexMetaData method getIndexMetaData.

private static IndexMetaDataCache getIndexMetaData(RegionCoprocessorEnvironment env, Map<String, byte[]> attributes) throws IOException {
    if (attributes == null) {
        return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE;
    }
    byte[] uuid = attributes.get(PhoenixIndexCodec.INDEX_UUID);
    if (uuid == null) {
        return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE;
    }
    boolean useProto = false;
    byte[] md = attributes.get(PhoenixIndexCodec.INDEX_PROTO_MD);
    useProto = md != null;
    if (md == null) {
        md = attributes.get(PhoenixIndexCodec.INDEX_MD);
    }
    byte[] txState = attributes.get(BaseScannerRegionObserver.TX_STATE);
    if (md != null) {
        final List<IndexMaintainer> indexMaintainers = IndexMaintainer.deserialize(md, useProto);
        final Transaction txn = MutationState.decodeTransaction(txState);
        return new IndexMetaDataCache() {

            @Override
            public void close() throws IOException {
            }

            @Override
            public List<IndexMaintainer> getIndexMaintainers() {
                return indexMaintainers;
            }

            @Override
            public Transaction getTransaction() {
                return txn;
            }
        };
    } else {
        byte[] tenantIdBytes = attributes.get(PhoenixRuntime.TENANT_ID_ATTRIB);
        ImmutableBytesPtr tenantId = tenantIdBytes == null ? null : new ImmutableBytesPtr(tenantIdBytes);
        TenantCache cache = GlobalCache.getTenantCache(env, tenantId);
        IndexMetaDataCache indexCache = (IndexMetaDataCache) cache.getServerCache(new ImmutableBytesPtr(uuid));
        if (indexCache == null) {
            String msg = "key=" + ServerCacheClient.idToString(uuid) + " region=" + env.getRegion() + "host=" + env.getRegionServerServices().getServerName();
            SQLException e = new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_METADATA_NOT_FOUND).setMessage(msg).build().buildException();
            // will not return
            ServerUtil.throwIOException("Index update failed", e);
        }
        return indexCache;
    }
}
Also used : IndexMetaDataCache(org.apache.phoenix.cache.IndexMetaDataCache) TenantCache(org.apache.phoenix.cache.TenantCache) Transaction(org.apache.tephra.Transaction) SQLException(java.sql.SQLException) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Aggregations

Transaction (org.apache.tephra.Transaction)99 Test (org.junit.Test)54 TransactionAware (org.apache.tephra.TransactionAware)34 Table (co.cask.cdap.api.dataset.table.Table)29 DatasetAdmin (co.cask.cdap.api.dataset.DatasetAdmin)27 HBaseTable (co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable)22 Put (co.cask.cdap.api.dataset.table.Put)12 DatasetProperties (co.cask.cdap.api.dataset.DatasetProperties)11 Get (co.cask.cdap.api.dataset.table.Get)10 TransactionSystemClient (org.apache.tephra.TransactionSystemClient)10 Row (co.cask.cdap.api.dataset.table.Row)8 ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)8 KeyStructValueTableDefinition (co.cask.cdap.explore.service.datasets.KeyStructValueTableDefinition)8 Scan (co.cask.cdap.api.dataset.table.Scan)7 ArrayList (java.util.ArrayList)7 CConfiguration (co.cask.cdap.common.conf.CConfiguration)6 ExploreExecutionResult (co.cask.cdap.explore.client.ExploreExecutionResult)6 DatasetId (co.cask.cdap.proto.id.DatasetId)6 IOException (java.io.IOException)6 BufferingTableTest (co.cask.cdap.data2.dataset2.lib.table.BufferingTableTest)5