Search in sources :

Example 31 with TransactionFailureException

use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.

the class DatasetBasedStreamSizeScheduleStore method list.

/**
   * @return a list of all the schedules and their states present in the store
   */
public synchronized List<StreamSizeScheduleState> list() throws InterruptedException, TransactionFailureException {
    final List<StreamSizeScheduleState> scheduleStates = Lists.newArrayList();
    factory.createExecutor(ImmutableList.of((TransactionAware) table)).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            try (Scanner scan = getScannerWithPrefix(table, KEY_PREFIX)) {
                Row row;
                while ((row = scan.next()) != null) {
                    byte[] scheduleBytes = row.get(SCHEDULE_COL);
                    byte[] baseSizeBytes = row.get(BASE_SIZE_COL);
                    byte[] baseTsBytes = row.get(BASE_TS_COL);
                    byte[] lastRunSizeBytes = row.get(LAST_RUN_SIZE_COL);
                    byte[] lastRunTsBytes = row.get(LAST_RUN_TS_COL);
                    byte[] activeBytes = row.get(ACTIVE_COL);
                    byte[] propertyBytes = row.get(PROPERTIES_COL);
                    if (isInvalidRow(row)) {
                        LIMITED_LOG.debug("Stream Sized Schedule entry with Row key {} does not have all columns.", Bytes.toString(row.getRow()));
                        continue;
                    }
                    String rowKey = Bytes.toString(row.getRow());
                    String[] splits = rowKey.split(":");
                    ProgramId program;
                    if (splits.length == 7) {
                        // New Row key for the trigger should be of the form -
                        // streamSizeSchedule:namespace:application:version:type:program:schedule
                        program = new ApplicationId(splits[1], splits[2], splits[3]).program(ProgramType.valueOf(splits[4]), splits[5]);
                    } else if (splits.length == 6) {
                        program = new ApplicationId(splits[1], splits[2]).program(ProgramType.valueOf(splits[3]), splits[4]);
                    } else {
                        continue;
                    }
                    SchedulableProgramType programType = program.getType().getSchedulableType();
                    StreamSizeSchedule schedule = GSON.fromJson(Bytes.toString(scheduleBytes), StreamSizeSchedule.class);
                    long baseSize = Bytes.toLong(baseSizeBytes);
                    long baseTs = Bytes.toLong(baseTsBytes);
                    long lastRunSize = Bytes.toLong(lastRunSizeBytes);
                    long lastRunTs = Bytes.toLong(lastRunTsBytes);
                    boolean active = Bytes.toBoolean(activeBytes);
                    Map<String, String> properties = Maps.newHashMap();
                    if (propertyBytes != null) {
                        properties = GSON.fromJson(Bytes.toString(propertyBytes), STRING_MAP_TYPE);
                    }
                    StreamSizeScheduleState scheduleState = new StreamSizeScheduleState(program, programType, schedule, properties, baseSize, baseTs, lastRunSize, lastRunTs, active);
                    scheduleStates.add(scheduleState);
                    LOG.debug("StreamSizeSchedule found in store: {}", scheduleState);
                }
            }
        }
    });
    return scheduleStates;
}
Also used : Scanner(co.cask.cdap.api.dataset.table.Scanner) TransactionExecutor(org.apache.tephra.TransactionExecutor) ProgramId(co.cask.cdap.proto.id.ProgramId) TransactionFailureException(org.apache.tephra.TransactionFailureException) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) TransactionConflictException(org.apache.tephra.TransactionConflictException) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) IOException(java.io.IOException) StreamSizeScheduleState(co.cask.cdap.internal.app.runtime.schedule.StreamSizeScheduleState) SchedulableProgramType(co.cask.cdap.api.schedule.SchedulableProgramType) Row(co.cask.cdap.api.dataset.table.Row) ApplicationId(co.cask.cdap.proto.id.ApplicationId) StreamSizeSchedule(co.cask.cdap.internal.schedule.StreamSizeSchedule) Map(java.util.Map)

Example 32 with TransactionFailureException

use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.

the class DatasetBasedStreamSizeScheduleStore method upgrade.

/**
   * Method to add version in StreamSizeSchedule row key in SchedulerStore.
   *
   * @throws InterruptedException
   * @throws IOException
   * @throws DatasetManagementException
   */
public void upgrade() throws InterruptedException, IOException, DatasetManagementException {
    // Wait until the store is initialized
    // Use a new instance of table since Table is not thread safe
    Table metaTable = null;
    while (metaTable == null) {
        try {
            metaTable = tableUtil.getMetaTable();
        } catch (Exception e) {
        // ignore exception
        }
        TimeUnit.SECONDS.sleep(10);
    }
    if (isUpgradeComplete()) {
        LOG.info("{} is already upgraded.", NAME);
        return;
    }
    final AtomicInteger maxNumberUpdateRows = new AtomicInteger(1000);
    final AtomicInteger sleepTimeInSecs = new AtomicInteger(60);
    LOG.info("Starting upgrade of {}.", NAME);
    while (true) {
        sleepTimeInSecs.set(60);
        try {
            if (executeUpgradeInTransaction(table, maxNumberUpdateRows)) {
                break;
            }
        } catch (TransactionFailureException e) {
            if (e instanceof TransactionConflictException) {
                LOG.debug("Upgrade step faced Transaction Conflict exception. Retrying operation now.", e);
                sleepTimeInSecs.set(10);
            } else if (e instanceof TransactionNotInProgressException) {
                int currMaxRows = maxNumberUpdateRows.get();
                if (currMaxRows > 500) {
                    maxNumberUpdateRows.decrementAndGet();
                } else {
                    LOG.warn("Could not complete upgrade of {}, tried for 500 times", NAME);
                    return;
                }
                sleepTimeInSecs.set(10);
                LOG.debug("Upgrade step faced a Transaction Timeout exception. " + "Current number of max update rows is set to : {} and retrying the operation now.", maxNumberUpdateRows.get(), e);
            } else {
                LOG.error("Upgrade step faced exception. Will retry operation after some delay.", e);
                sleepTimeInSecs.set(60);
            }
        }
        TimeUnit.SECONDS.sleep(sleepTimeInSecs.get());
    }
    LOG.info("Upgrade of {} is complete.", NAME);
}
Also used : TransactionFailureException(org.apache.tephra.TransactionFailureException) Table(co.cask.cdap.api.dataset.table.Table) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TransactionConflictException(org.apache.tephra.TransactionConflictException) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) TransactionFailureException(org.apache.tephra.TransactionFailureException) TransactionNotInProgressException(org.apache.tephra.TransactionNotInProgressException) TransactionConflictException(org.apache.tephra.TransactionConflictException) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) IOException(java.io.IOException)

Example 33 with TransactionFailureException

use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.

the class SparkTransactionHandlerTest method testFailureTransaction.

/**
 * Tests the case where starting of transaction failed.
 */
@Test
public void testFailureTransaction() throws Exception {
    TransactionManager txManager = new TransactionManager(new Configuration()) {

        @Override
        public Transaction startLong() {
            throw new IllegalStateException("Cannot start long transaction");
        }
    };
    txManager.startAndWait();
    try {
        SparkTransactionHandler txHandler = new SparkTransactionHandler(new InMemoryTxSystemClient(txManager));
        SparkDriverHttpService httpService = new SparkDriverHttpService("test", InetAddress.getLoopbackAddress().getCanonicalHostName(), txHandler);
        httpService.startAndWait();
        try {
            // Start a job
            txHandler.jobStarted(1, ImmutableSet.of(2));
            // Make a call to the stage transaction endpoint, it should throw TransactionFailureException
            try {
                new SparkTransactionClient(httpService.getBaseURI()).getTransaction(2, 1, TimeUnit.SECONDS);
                Assert.fail("Should failed to get transaction");
            } catch (TransactionFailureException e) {
            // expected
            }
            // End the job
            txHandler.jobEnded(1, false);
        } finally {
            httpService.stopAndWait();
        }
    } finally {
        txManager.stopAndWait();
    }
}
Also used : TransactionFailureException(org.apache.tephra.TransactionFailureException) Configuration(org.apache.hadoop.conf.Configuration) TransactionManager(org.apache.tephra.TransactionManager) InMemoryTxSystemClient(org.apache.tephra.inmemory.InMemoryTxSystemClient) Test(org.junit.Test)

Example 34 with TransactionFailureException

use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.

the class DatasetTypeManager method addModule.

/**
 * Add datasets module in a namespace
 *
 * @param datasetModuleId the {@link DatasetModuleId} to add
 * @param className module class
 * @param jarLocation location of the module jar
 * @param force if true, an update will be allowed even if there are conflicts with other modules, or if
 *                     removal of a type would break other modules' dependencies.
 */
public void addModule(final DatasetModuleId datasetModuleId, final String className, final Location jarLocation, final boolean force) throws DatasetModuleConflictException {
    LOG.debug("adding module: {}, className: {}, jarLocation: {}", datasetModuleId, className, jarLocation == null ? "[local]" : jarLocation);
    try {
        final DatasetTypeMDS datasetTypeMDS = datasetCache.getDataset(DatasetMetaTableUtil.META_TABLE_NAME);
        final DatasetInstanceMDS datasetInstanceMDS = datasetCache.getDataset(DatasetMetaTableUtil.INSTANCE_TABLE_NAME);
        txExecutorFactory.createExecutor(datasetCache).execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                // 1. get existing module with all its types
                DatasetModuleMeta existing = datasetTypeMDS.getModule(datasetModuleId);
                DependencyTrackingRegistry reg;
                // 2. unpack jar and create class loader
                File unpackedLocation = Files.createTempDirectory(Files.createDirectories(systemTempPath), datasetModuleId.getEntityName()).toFile();
                DirectoryClassLoader cl = null;
                try {
                    // NOTE: if jarLocation is null, we assume that this is a system module, ie. always present in classpath
                    if (jarLocation != null) {
                        BundleJarUtil.unJar(jarLocation, unpackedLocation);
                        cl = new DirectoryClassLoader(unpackedLocation, cConf.get(Constants.AppFabric.PROGRAM_EXTRA_CLASSPATH), FilterClassLoader.create(getClass().getClassLoader()), "lib");
                    }
                    reg = new DependencyTrackingRegistry(datasetModuleId, datasetTypeMDS, cl, force);
                    // 3. register the new module while tracking dependencies.
                    // this will fail if a type exists in a different module
                    DatasetDefinitionRegistries.register(className, cl, reg);
                } catch (TypeConflictException e) {
                    // type conflict from the registry, we want to throw that as is
                    throw e;
                } catch (Exception e) {
                    LOG.error("Could not instantiate instance of dataset module class {} for module {} using jarLocation {}", className, datasetModuleId, jarLocation);
                    throw Throwables.propagate(e);
                } finally {
                    if (cl != null) {
                        // Close the ProgramClassLoader
                        Closeables.closeQuietly(cl);
                    }
                    try {
                        DirUtils.deleteDirectoryContents(unpackedLocation);
                    } catch (IOException e) {
                        LOG.warn("Failed to delete directory {}", unpackedLocation, e);
                    }
                }
                // 4. determine whether any type were removed from the module, and whether any other modules depend on them
                if (existing != null) {
                    Set<String> removedTypes = new HashSet<>(existing.getTypes());
                    removedTypes.removeAll(reg.getTypes());
                    // TODO (CDAP-6294): track dependencies at the type level
                    if (!force && !removedTypes.isEmpty() && !existing.getUsedByModules().isEmpty()) {
                        throw new DatasetModuleConflictException(String.format("Cannot update module '%s' to remove types %s: Modules %s may depend on it. Delete them first", datasetModuleId, removedTypes, existing.getUsedByModules()));
                    }
                    Collection<DatasetSpecification> instances = datasetInstanceMDS.getByTypes(datasetModuleId.getParent(), removedTypes);
                    if (!instances.isEmpty()) {
                        throw new DatasetModuleConflictException(String.format("Attempt to remove dataset types %s from module '%s' that have existing instances: %s. " + "Delete them first.", removedTypes, datasetModuleId, Iterables.toString(Iterables.transform(instances, new Function<DatasetSpecification, String>() {

                            @Nullable
                            @Override
                            public String apply(@Nullable DatasetSpecification input) {
                                return input.getName() + ":" + input.getType();
                            }
                        }))));
                    }
                }
                // NOTE: we use set to avoid duplicated dependencies
                // NOTE: we use LinkedHashSet to preserve order in which dependencies must be loaded
                Set<String> moduleDependencies = new LinkedHashSet<String>();
                for (DatasetTypeId usedType : reg.getUsedTypes()) {
                    DatasetModuleMeta usedModule = datasetTypeMDS.getModuleByType(usedType);
                    Preconditions.checkState(usedModule != null, String.format("Found a null used module for type %s for while adding module %s", usedType, datasetModuleId));
                    // adding all used types and the module itself, in this very order to keep the order of loading modules
                    // for instantiating a type
                    moduleDependencies.addAll(usedModule.getUsesModules());
                    boolean added = moduleDependencies.add(usedModule.getName());
                    if (added) {
                        // also adding this module as a dependent for all modules it uses
                        usedModule.addUsedByModule(datasetModuleId.getEntityName());
                        datasetTypeMDS.writeModule(usedType.getParent(), usedModule);
                    }
                }
                URI jarURI = jarLocation == null ? null : jarLocation.toURI();
                DatasetModuleMeta moduleMeta = existing == null ? new DatasetModuleMeta(datasetModuleId.getEntityName(), className, jarURI, reg.getTypes(), Lists.newArrayList(moduleDependencies)) : new DatasetModuleMeta(datasetModuleId.getEntityName(), className, jarURI, reg.getTypes(), Lists.newArrayList(moduleDependencies), Lists.newArrayList(existing.getUsedByModules()));
                datasetTypeMDS.writeModule(datasetModuleId.getParent(), moduleMeta);
            }
        });
    } catch (TransactionFailureException e) {
        Throwable cause = e.getCause();
        if (cause != null) {
            if (cause instanceof DatasetModuleConflictException) {
                throw (DatasetModuleConflictException) cause;
            } else if (cause instanceof TypeConflictException) {
                throw new DatasetModuleConflictException(cause.getMessage(), cause);
            }
        }
        throw Throwables.propagate(e);
    } catch (Exception e) {
        LOG.error("Operation failed", e);
        throw Throwables.propagate(e);
    }
}
Also used : ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet) DatasetTypeId(co.cask.cdap.proto.id.DatasetTypeId) URI(java.net.URI) DatasetModuleMeta(co.cask.cdap.proto.DatasetModuleMeta) DirectoryClassLoader(co.cask.cdap.common.lang.DirectoryClassLoader) TypeConflictException(co.cask.cdap.data2.dataset2.TypeConflictException) DatasetInstanceMDS(co.cask.cdap.data2.datafabric.dataset.service.mds.DatasetInstanceMDS) DatasetSpecification(co.cask.cdap.api.dataset.DatasetSpecification) TransactionExecutor(org.apache.tephra.TransactionExecutor) IOException(java.io.IOException) TransactionFailureException(org.apache.tephra.TransactionFailureException) TypeConflictException(co.cask.cdap.data2.dataset2.TypeConflictException) IOException(java.io.IOException) DatasetTypeMDS(co.cask.cdap.data2.datafabric.dataset.service.mds.DatasetTypeMDS) TransactionFailureException(org.apache.tephra.TransactionFailureException) Collection(java.util.Collection) File(java.io.File) Nullable(javax.annotation.Nullable)

Example 35 with TransactionFailureException

use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.

the class BaseHiveExploreService method closeTransaction.

private void closeTransaction(QueryHandle handle, OperationInfo opInfo) {
    try {
        String txCommitted = opInfo.getSessionConf().get(Constants.Explore.TX_QUERY_CLOSED);
        if (txCommitted != null && Boolean.parseBoolean(txCommitted)) {
            LOG.trace("Transaction for handle {} has already been closed", handle);
            return;
        }
        Transaction tx = ConfigurationUtil.get(opInfo.getSessionConf(), Constants.Explore.TX_QUERY_KEY, TxnCodec.INSTANCE);
        LOG.trace("Closing transaction {} for handle {}", tx, handle);
        if (opInfo.isReadOnly() || (opInfo.getStatus() != null && opInfo.getStatus().getStatus() == QueryStatus.OpStatus.FINISHED)) {
            try {
                txClient.commitOrThrow(tx);
            } catch (TransactionFailureException e) {
                txClient.invalidate(tx.getWritePointer());
                LOG.info("Invalidating transaction: {}", tx);
            }
        } else {
            txClient.invalidate(tx.getWritePointer());
        }
    } catch (Throwable e) {
        LOG.error("Got exception while closing transaction.", e);
    } finally {
        opInfo.getSessionConf().put(Constants.Explore.TX_QUERY_CLOSED, "true");
    }
}
Also used : TransactionFailureException(org.apache.tephra.TransactionFailureException) Transaction(org.apache.tephra.Transaction)

Aggregations

TransactionFailureException (org.apache.tephra.TransactionFailureException)55 Test (org.junit.Test)19 TransactionContext (org.apache.tephra.TransactionContext)17 IOException (java.io.IOException)16 TransactionExecutor (org.apache.tephra.TransactionExecutor)12 TransactionConflictException (org.apache.tephra.TransactionConflictException)8 TxRunnable (co.cask.cdap.api.TxRunnable)6 DatasetContext (co.cask.cdap.api.data.DatasetContext)6 Location (org.apache.twill.filesystem.Location)6 TransactionAware (org.apache.tephra.TransactionAware)5 DataSetException (co.cask.cdap.api.dataset.DataSetException)4 DatasetManagementException (co.cask.cdap.api.dataset.DatasetManagementException)4 Table (co.cask.cdap.api.dataset.table.Table)4 ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)4 List (java.util.List)4 Map (java.util.Map)4 ArrayList (java.util.ArrayList)3 Collection (java.util.Collection)3 TimeoutException (java.util.concurrent.TimeoutException)3 Transaction (org.apache.tephra.Transaction)3