Search in sources :

Example 1 with REPLACE_EXISTING

use of java.nio.file.StandardCopyOption.REPLACE_EXISTING in project disunity by ata4.

the class BundleUnpack method runFile.

@Override
protected void runFile(Path file) {
    try (BundleReader bundleReader = new BundleReader(file)) {
        Bundle bundle = bundleReader.read();
        AtomicInteger done = new AtomicInteger();
        long total = bundle.entryInfos().size();
        // define output directory, if not yet defined
        if (outputDir == null) {
            // with sub-directories
            if (bundle.entryInfos().size() == 1) {
                outputDir = file.getParent();
                if (outputDir == null) {
                    // Passed a filename only. Use the current directory.
                    outputDir = Paths.get(".");
                }
            } else {
                String fileName = PathUtils.getBaseName(file);
                outputDir = file.resolveSibling(fileName);
            }
        }
        try {
            bundle.entries().stream().filter(entry -> filename == null || entry.name().equals(filename)).forEach(uncheck(entry -> {
                progress.update(Optional.of(entry.name()), done.getAndIncrement() / (double) total);
                Path entryFile = outputDir.resolve(entry.name());
                Files.createDirectories(entryFile.getParent());
                Files.copy(entry.inputStream(), entryFile, REPLACE_EXISTING);
                if (done.get() == total) {
                    progress.update(Optional.empty(), 1);
                }
            }));
        } catch (UncheckedIOException ex) {
            throw ex.getCause();
        }
        if (writeProp && filename == null) {
            String bundleName = outputDir.getFileName().toString();
            Path propsFile = outputDir.getParent().resolve(bundleName + ".json");
            BundleProps.write(propsFile, bundle);
        }
    } catch (IOException ex) {
        L.log(Level.WARNING, "Can't unpack asset bundle " + file, ex);
    }
}
Also used : LogUtils(info.ata4.log.LogUtils) Parameters(com.beust.jcommander.Parameters) IOConsumer.uncheck(info.ata4.util.function.IOConsumer.uncheck) Parameter(com.beust.jcommander.Parameter) Files(java.nio.file.Files) IOException(java.io.IOException) Logger(java.util.logging.Logger) Level(java.util.logging.Level) UncheckedIOException(java.io.UncheckedIOException) FileCommand(info.ata4.disunity.cli.command.FileCommand) PathConverter(info.ata4.disunity.cli.converters.PathConverter) Bundle(info.ata4.junity.bundle.Bundle) Paths(java.nio.file.Paths) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PathUtils(info.ata4.io.util.PathUtils) Optional(java.util.Optional) BundleReader(info.ata4.junity.bundle.BundleReader) Path(java.nio.file.Path) REPLACE_EXISTING(java.nio.file.StandardCopyOption.REPLACE_EXISTING) Path(java.nio.file.Path) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Bundle(info.ata4.junity.bundle.Bundle) UncheckedIOException(java.io.UncheckedIOException) IOException(java.io.IOException) UncheckedIOException(java.io.UncheckedIOException) BundleReader(info.ata4.junity.bundle.BundleReader)

Example 2 with REPLACE_EXISTING

use of java.nio.file.StandardCopyOption.REPLACE_EXISTING in project halyard by spinnaker.

the class Node method backupLocalFiles.

public List<String> backupLocalFiles(String outputPath) {
    List<String> files = new ArrayList<>();
    Consumer<Node> fileFinder = n -> files.addAll(n.localFiles().stream().map(f -> {
        try {
            f.setAccessible(true);
            String fPath = (String) f.get(n);
            if (fPath == null) {
                return null;
            }
            File fFile = new File(fPath);
            String fName = fFile.getName();
            // Hash the path to uniquely flatten all files into the output directory
            Path newName = Paths.get(outputPath, Math.abs(fPath.hashCode()) + "-" + fName);
            File parent = newName.toFile().getParentFile();
            if (!parent.exists()) {
                parent.mkdirs();
            } else if (fFile.getParent().equals(parent.toString())) {
                // Don't move paths that are already in the right folder
                return fPath;
            }
            Files.copy(Paths.get(fPath), newName, REPLACE_EXISTING);
            f.set(n, newName.toString());
            return newName.toString();
        } catch (IllegalAccessException e) {
            throw new RuntimeException("Failed to get local files for node " + n.getNodeName(), e);
        } catch (IOException e) {
            throw new HalException(FATAL, "Failed to backup user file: " + e.getMessage(), e);
        } finally {
            f.setAccessible(false);
        }
    }).filter(Objects::nonNull).collect(Collectors.toList()));
    recursiveConsume(fileFinder);
    return files;
}
Also used : Arrays(java.util.Arrays) Getter(lombok.Getter) HalException(com.netflix.spinnaker.halyard.core.error.v1.HalException) HashMap(java.util.HashMap) ConfigProblemSetBuilder(com.netflix.spinnaker.halyard.config.problem.v1.ConfigProblemSetBuilder) ArrayList(java.util.ArrayList) REMOVED(com.netflix.spinnaker.halyard.config.model.v1.node.NodeDiff.ChangeType.REMOVED) Map(java.util.Map) JsonIgnore(com.fasterxml.jackson.annotation.JsonIgnore) REPLACE_EXISTING(java.nio.file.StandardCopyOption.REPLACE_EXISTING) Method(java.lang.reflect.Method) Path(java.nio.file.Path) GlobalApplicationOptions(com.netflix.spinnaker.halyard.core.GlobalApplicationOptions) Files(java.nio.file.Files) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) FileUtils(org.apache.commons.io.FileUtils) IOException(java.io.IOException) Field(java.lang.reflect.Field) Collectors(java.util.stream.Collectors) EDITED(com.netflix.spinnaker.halyard.config.model.v1.node.NodeDiff.ChangeType.EDITED) File(java.io.File) InvocationTargetException(java.lang.reflect.InvocationTargetException) Objects(java.util.Objects) Consumer(java.util.function.Consumer) ADDED(com.netflix.spinnaker.halyard.config.model.v1.node.NodeDiff.ChangeType.ADDED) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) FATAL(com.netflix.spinnaker.halyard.core.problem.v1.Problem.Severity.FATAL) Paths(java.nio.file.Paths) CRC32(java.util.zip.CRC32) Path(java.nio.file.Path) HalException(com.netflix.spinnaker.halyard.core.error.v1.HalException) ArrayList(java.util.ArrayList) IOException(java.io.IOException) Objects(java.util.Objects) File(java.io.File)

Example 3 with REPLACE_EXISTING

use of java.nio.file.StandardCopyOption.REPLACE_EXISTING in project presto by prestodb.

the class TestHivePushdownFilterQueries method assertFileFormat.

private void assertFileFormat(HiveStorageFormat storageFormat) throws IOException {
    // Make an ORC table backed by file of some other format
    QueryRunner queryRunner = getQueryRunner();
    queryRunner.execute("CREATE TABLE test_file_format_orc WITH (format='ORC', partitioned_by=ARRAY['ds']) AS " + "SELECT * FROM lineitem_ex_partitioned LIMIT 1");
    try {
        queryRunner.execute(format("CREATE TABLE test_file_format WITH (format='%s', partitioned_by=ARRAY['ds']) AS " + "SELECT * FROM lineitem_ex_partitioned", storageFormat));
        Path orcDirectory = getPartitionDirectory("test_file_format_orc", "ds='2019-11-01'");
        deleteRecursively(orcDirectory, ALLOW_INSECURE);
        Path otherDirectory = getPartitionDirectory("test_file_format", "ds='2019-11-01'");
        Files.move(otherDirectory, orcDirectory, REPLACE_EXISTING);
        String cte = WITH_LINEITEM_EX + ", test_file_format_orc AS " + "(SELECT\n" + "    linenumber,\n" + "    orderkey,\n" + "    partkey,\n" + "    suppkey,\n" + "    quantity,\n" + "    extendedprice,\n" + "    tax,\n" + "    shipinstruct,\n" + "    shipmode,\n" + "    ship_by_air,\n" + "    is_returned,\n" + "    ship_day,\n" + "    ship_month,\n" + "    ship_timestamp,\n" + "    commit_timestamp,\n" + "    discount_real,\n" + "    discount,\n" + "    tax_real,\n" + "    ship_day_month,\n" + "    discount_long_decimal,\n" + "    tax_short_decimal,\n" + "    long_decimals,\n" + "    keys,\n" + "    doubles,\n" + "    nested_keys,\n" + "    flags,\n" + "    reals,\n" + "    info,\n" + "    dates,\n" + "    timestamps,\n" + "    comment,\n" + "    uppercase_comment,\n" + "    fixed_comment,\n" + "    char_array,\n" + "    varchar_array,\n" + "    '2019-11-01' AS ds\n" + "FROM lineitem_ex)";
        // no filter
        assertQueryUsingH2Cte("SELECT * FROM test_file_format_orc", cte);
        assertQueryUsingH2Cte("SELECT comment FROM test_file_format_orc", cte);
        assertQueryFails("SELECT COUNT(*) FROM test_file_format_orc", "Partial aggregation pushdown only supported for ORC/Parquet files. Table tpch.test_file_format_orc has file ((.*?)) of format (.*?). Set session property hive.pushdown_partial_aggregations_into_scan=false and execute query again");
        assertQueryUsingH2Cte(noPartialAggregationPushdown(queryRunner.getDefaultSession()), "SELECT COUNT(*) FROM test_file_format_orc", cte, Function.identity());
        // filter on partition column
        assertQueryUsingH2Cte("SELECT comment from test_file_format_orc WHERE ds='2019-11-01'", cte);
        assertQueryReturnsEmptyResult("SELECT comment FROM test_file_format_orc WHERE ds='2019-11-02'");
        // range filters and filter functions
        assertQueryUsingH2Cte("SELECT orderkey from test_file_format_orc WHERE orderkey < 1000", cte);
        assertQueryUsingH2Cte("SELECT orderkey, comment from test_file_format_orc WHERE orderkey < 1000 AND comment LIKE '%final%'", cte);
        assertQueryUsingH2Cte("SELECT COUNT(*) from test_file_format_orc WHERE orderkey < 1000", cte);
        assertQueryUsingH2Cte("SELECT COUNT(*) FROM test_file_format_orc WHERE concat(ds,'*') = '2019-11-01*'", cte);
        assertQueryUsingH2Cte("SELECT orderkey FROM test_file_format_orc WHERE comment LIKE '%final%'", cte);
        assertQueryUsingH2Cte("SELECT discount FROM test_file_format_orc WHERE discount > 0.01", cte);
        assertQueryUsingH2Cte("SELECT * FROM test_file_format_orc WHERE discount > 0.01 and discount + tax > 0.03", cte);
        assertQueryUsingH2Cte("SELECT COUNT(*) FROM test_file_format_orc WHERE discount = 0.0", cte);
        assertQueryUsingH2Cte("SELECT COUNT(*) FROM test_file_format_orc WHERE discount_real > 0.01", cte);
        assertQueryUsingH2Cte("SELECT * FROM test_file_format_orc WHERE tax_real > 0.01 and discount_real > 0.01", cte);
        assertQueryUsingH2Cte("SELECT keys FROM test_file_format_orc WHERE keys IS NOT NULL", cte);
        assertQueryUsingH2Cte("SELECT keys FROM test_file_format_orc WHERE keys IS NULL", cte);
        assertQueryUsingH2Cte("SELECT linenumber FROM test_file_format_orc WHERE keys[1] % 5 = 0 AND keys[2] > 100", cte);
        assertQueryUsingH2Cte("SELECT * FROM test_file_format_orc WHERE is_returned=false", cte);
        assertQueryUsingH2Cte("SELECT * FROM test_file_format_orc WHERE is_returned is NULL", cte);
        assertQueryUsingH2Cte("SELECT ship_day FROM test_file_format_orc WHERE ship_day > 2", cte);
        assertQueryUsingH2Cte("SELECT discount_long_decimal FROM test_file_format_orc WHERE discount_long_decimal > 0.05", cte);
        assertQueryUsingH2Cte("SELECT tax_short_decimal FROM test_file_format_orc WHERE tax_short_decimal < 0.03", cte);
        assertQueryUsingH2Cte("SELECT discount_long_decimal FROM test_file_format_orc WHERE discount_long_decimal > 0.01 AND tax_short_decimal > 0.01 AND (discount_long_decimal + tax_short_decimal) < 0.03", cte);
        Function<String, String> rewriter = query -> query.replaceAll("info.orderkey", "info[1]").replaceAll("dates\\[1\\].day", "dates[1][1]");
        assertQueryUsingH2Cte("SELECT dates FROM test_file_format_orc WHERE dates[1].day % 2 = 0", cte, rewriter);
        assertQueryUsingH2Cte("SELECT info.orderkey, dates FROM test_file_format_orc WHERE info IS NOT NULL AND dates IS NOT NULL AND info.orderkey % 7 = 0", cte, rewriter);
        // empty result
        assertQueryReturnsEmptyResult("SELECT comment FROM test_file_format_orc WHERE orderkey < 0");
        assertQueryReturnsEmptyResult("SELECT comment FROM test_file_format_orc WHERE comment LIKE '???'");
    } finally {
        assertUpdate("DROP TABLE IF EXISTS test_file_format");
        assertUpdate("DROP TABLE test_file_format_orc");
    }
}
Also used : Path(java.nio.file.Path) MoreFiles.deleteRecursively(com.google.common.io.MoreFiles.deleteRecursively) QueryRunner(com.facebook.presto.testing.QueryRunner) Test(org.testng.annotations.Test) TINYINT(com.facebook.presto.common.type.StandardTypes.TINYINT) Function(java.util.function.Function) DistributedQueryRunner(com.facebook.presto.tests.DistributedQueryRunner) Matcher(java.util.regex.Matcher) ALLOW_INSECURE(com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE) ImmutableList(com.google.common.collect.ImmutableList) RCTEXT(com.facebook.presto.hive.HiveStorageFormat.RCTEXT) Splitter(com.google.common.base.Splitter) Path(java.nio.file.Path) BIGINT(com.facebook.presto.common.type.StandardTypes.BIGINT) TEXTFILE(com.facebook.presto.hive.HiveStorageFormat.TEXTFILE) REPLACE_EXISTING(java.nio.file.StandardCopyOption.REPLACE_EXISTING) BOOLEAN(com.facebook.presto.common.type.StandardTypes.BOOLEAN) AbstractTestQueryFramework(com.facebook.presto.tests.AbstractTestQueryFramework) ATOMIC_MOVE(java.nio.file.StandardCopyOption.ATOMIC_MOVE) HIVE_CATALOG(com.facebook.presto.hive.HiveQueryRunner.HIVE_CATALOG) ImmutableMap(com.google.common.collect.ImmutableMap) Files(java.nio.file.Files) Session(com.facebook.presto.Session) DATE(com.facebook.presto.common.type.StandardTypes.DATE) IOException(java.io.IOException) INTEGER(com.facebook.presto.common.type.StandardTypes.INTEGER) String.format(java.lang.String.format) Collectors.joining(java.util.stream.Collectors.joining) DOUBLE(com.facebook.presto.common.type.StandardTypes.DOUBLE) VARCHAR(com.facebook.presto.common.type.StandardTypes.VARCHAR) TpchTable.getTables(io.airlift.tpch.TpchTable.getTables) RCBINARY(com.facebook.presto.hive.HiveStorageFormat.RCBINARY) List(java.util.List) Paths(java.nio.file.Paths) PUSHDOWN_FILTER_ENABLED(com.facebook.presto.hive.HiveSessionProperties.PUSHDOWN_FILTER_ENABLED) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) SMALLINT(com.facebook.presto.common.type.StandardTypes.SMALLINT) REAL(com.facebook.presto.common.type.StandardTypes.REAL) PARTIAL_AGGREGATION_PUSHDOWN_ENABLED(com.facebook.presto.hive.HiveSessionProperties.PARTIAL_AGGREGATION_PUSHDOWN_ENABLED) QueryRunner(com.facebook.presto.testing.QueryRunner) DistributedQueryRunner(com.facebook.presto.tests.DistributedQueryRunner)

Example 4 with REPLACE_EXISTING

use of java.nio.file.StandardCopyOption.REPLACE_EXISTING in project ignite by apache.

the class FileWriteAheadLogManager method formatWorkSegments.

/**
 * Formatting working segments to {@link DataStorageConfiguration#getWalSegmentSize()} for work in a mmap or fsync case.
 *
 * @throws StorageException If an error occurs when formatting.
 */
private void formatWorkSegments() throws StorageException {
    assert isArchiverEnabled();
    if (mode == WALMode.FSYNC || mmap) {
        List<FileDescriptor> toFormat = Arrays.stream(scan(walWorkDir.listFiles(WAL_SEGMENT_FILE_FILTER))).filter(fd -> fd.file().length() < dsCfg.getWalSegmentSize()).collect(toList());
        if (!toFormat.isEmpty()) {
            if (log.isInfoEnabled()) {
                log.info("WAL segments in working directory should have the same size: '" + U.humanReadableByteCount(dsCfg.getWalSegmentSize()) + "'. Segments that need reformat " + "found: " + F.viewReadOnly(toFormat, fd -> fd.file().getName()) + '.');
            }
            for (int i = 0, j = 0; i < toFormat.size(); i++) {
                FileDescriptor fd = toFormat.get(i);
                File tmpDst = new File(fd.file().getName() + TMP_SUFFIX);
                try {
                    Files.copy(fd.file().toPath(), tmpDst.toPath());
                    if (log.isDebugEnabled()) {
                        log.debug("Start formatting WAL segment [filePath=" + tmpDst.getAbsolutePath() + ", fileSize=" + U.humanReadableByteCount(tmpDst.length()) + ", toSize=" + U.humanReadableByteCount(dsCfg.getWalSegmentSize()) + ']');
                    }
                    try (FileIO fileIO = ioFactory.create(tmpDst, CREATE, READ, WRITE)) {
                        int left = (int) (dsCfg.getWalSegmentSize() - tmpDst.length());
                        fileIO.position(tmpDst.length());
                        while (left > 0) left -= fileIO.writeFully(FILL_BUF, 0, Math.min(FILL_BUF.length, left));
                        fileIO.force();
                    }
                    Files.move(tmpDst.toPath(), fd.file().toPath(), REPLACE_EXISTING, ATOMIC_MOVE);
                    if (log.isDebugEnabled())
                        log.debug("WAL segment formatted: " + fd.file().getAbsolutePath());
                    // Batch output.
                    if (log.isInfoEnabled() && (i == toFormat.size() - 1 || (i != 0 && i % 9 == 0))) {
                        log.info("WAL segments formatted: " + toFormat.get(j).file().getName() + (i == j ? "" : " - " + fileName(i)));
                        j = i + 1;
                    }
                } catch (IOException e) {
                    throw new StorageException("Failed to format WAL segment: " + fd.file().getAbsolutePath(), e);
                }
            }
        }
    }
}
Also used : Arrays(java.util.Arrays) BufferedInputStream(java.io.BufferedInputStream) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) GridFinishedFuture(org.apache.ignite.internal.util.future.GridFinishedFuture) DataStorageMetricsImpl(org.apache.ignite.internal.processors.cache.persistence.DataStorageMetricsImpl) FileIO(org.apache.ignite.internal.processors.cache.persistence.file.FileIO) Map(java.util.Map) IGNITE_WAL_SERIALIZER_VERSION(org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_SERIALIZER_VERSION) WALMode(org.apache.ignite.configuration.WALMode) SegmentIO(org.apache.ignite.internal.processors.cache.persistence.wal.io.SegmentIO) CIX1(org.apache.ignite.internal.util.typedef.CIX1) RecordV1Serializer.readPosition(org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.readPosition) HALF_MAX_WAL_ARCHIVE_SIZE(org.apache.ignite.configuration.DataStorageConfiguration.HALF_MAX_WAL_ARCHIVE_SIZE) RandomAccessFileIOFactory(org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory) RecordSerializer(org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializer) IgniteInClosure(org.apache.ignite.lang.IgniteInClosure) IgniteDataIntegrityViolationException(org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException) IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT(org.apache.ignite.IgniteSystemProperties.IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) Set(java.util.Set) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) ByteOrder(java.nio.ByteOrder) IgniteConfiguration(org.apache.ignite.configuration.IgniteConfiguration) RecordSerializerFactoryImpl(org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactoryImpl) EVT_WAL_SEGMENT_COMPACTED(org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_COMPACTED) SYSTEM_WORKER_TERMINATION(org.apache.ignite.failure.FailureType.SYSTEM_WORKER_TERMINATION) CompressionProcessor.getDefaultCompressionLevel(org.apache.ignite.internal.processors.compress.CompressionProcessor.getDefaultCompressionLevel) MarshalledRecord(org.apache.ignite.internal.pagemem.wal.record.MarshalledRecord) IgniteWriteAheadLogManager(org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager) PageSnapshot(org.apache.ignite.internal.pagemem.wal.record.PageSnapshot) ZipOutputStream(java.util.zip.ZipOutputStream) WALIterator(org.apache.ignite.internal.pagemem.wal.WALIterator) IgniteBiPredicate(org.apache.ignite.lang.IgniteBiPredicate) IgniteThread(org.apache.ignite.thread.IgniteThread) ZIP_SUFFIX(org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.ZIP_SUFFIX) U(org.apache.ignite.internal.util.typedef.internal.U) EVT_WAL_SEGMENT_ARCHIVED(org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_ARCHIVED) IgniteLogger(org.apache.ignite.IgniteLogger) LATEST_SERIALIZER_VERSION(org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactory.LATEST_SERIALIZER_VERSION) BufferedOutputStream(java.io.BufferedOutputStream) ArrayList(java.util.ArrayList) GridKernalContext(org.apache.ignite.internal.GridKernalContext) READ(java.nio.file.StandardOpenOption.READ) DiskPageCompression(org.apache.ignite.configuration.DiskPageCompression) GridCacheDatabaseSharedManager(org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager) FileHandleManager(org.apache.ignite.internal.processors.cache.persistence.wal.filehandle.FileHandleManager) CI1(org.apache.ignite.internal.util.typedef.CI1) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) CompressionProcessor.checkCompressionLevelBounds(org.apache.ignite.internal.processors.compress.CompressionProcessor.checkCompressionLevelBounds) SwitchSegmentRecord(org.apache.ignite.internal.pagemem.wal.record.SwitchSegmentRecord) REPLACE_EXISTING(java.nio.file.StandardCopyOption.REPLACE_EXISTING) IGNITE_WAL_COMPRESSOR_WORKER_THREAD_CNT(org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_COMPRESSOR_WORKER_THREAD_CNT) FileDescriptor.fileName(org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor.fileName) ATOMIC_MOVE(java.nio.file.StandardCopyOption.ATOMIC_MOVE) Files(java.nio.file.Files) FileOutputStream(java.io.FileOutputStream) GridUnsafe(org.apache.ignite.internal.util.GridUnsafe) IOException(java.io.IOException) FileInput(org.apache.ignite.internal.processors.cache.persistence.wal.io.FileInput) UNLIMITED_WAL_ARCHIVE(org.apache.ignite.configuration.DataStorageConfiguration.UNLIMITED_WAL_ARCHIVE) GridFileUtils(org.apache.ignite.internal.util.io.GridFileUtils) File(java.io.File) AtomicLong(java.util.concurrent.atomic.AtomicLong) FileFilter(java.io.FileFilter) GridCacheSharedContext(org.apache.ignite.internal.processors.cache.GridCacheSharedContext) TreeMap(java.util.TreeMap) FileHandleManagerFactory(org.apache.ignite.internal.processors.cache.persistence.wal.filehandle.FileHandleManagerFactory) CREATE(java.nio.file.StandardOpenOption.CREATE) LockedSegmentFileInputFactory(org.apache.ignite.internal.processors.cache.persistence.wal.io.LockedSegmentFileInputFactory) WalSegmentArchivedEvent(org.apache.ignite.events.WalSegmentArchivedEvent) SegmentFileInputFactory(org.apache.ignite.internal.processors.cache.persistence.wal.io.SegmentFileInputFactory) CO(org.apache.ignite.internal.util.typedef.CO) IgniteUuid(org.apache.ignite.lang.IgniteUuid) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) StorageException(org.apache.ignite.internal.processors.cache.persistence.StorageException) CRITICAL_ERROR(org.apache.ignite.failure.FailureType.CRITICAL_ERROR) Time(java.sql.Time) IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE(org.apache.ignite.IgniteSystemProperties.IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE) FileWriteHandle(org.apache.ignite.internal.processors.cache.persistence.wal.filehandle.FileWriteHandle) DATA_RECORD_V2(org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.DATA_RECORD_V2) GridWorker(org.apache.ignite.internal.util.worker.GridWorker) ByteBuffer(java.nio.ByteBuffer) PriorityBlockingQueue(java.util.concurrent.PriorityBlockingQueue) IgniteSystemProperties(org.apache.ignite.IgniteSystemProperties) IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE(org.apache.ignite.IgniteSystemProperties.IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE) X(org.apache.ignite.internal.util.typedef.X) FailureProcessor(org.apache.ignite.internal.processors.failure.FailureProcessor) ZipEntry(java.util.zip.ZipEntry) WRITE(java.nio.file.StandardOpenOption.WRITE) RecordV1Serializer(org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer) FailureType(org.apache.ignite.failure.FailureType) AtomicReferenceFieldUpdater(java.util.concurrent.atomic.AtomicReferenceFieldUpdater) Collection(java.util.Collection) WALRecord(org.apache.ignite.internal.pagemem.wal.record.WALRecord) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) AbstractFileHandle(org.apache.ignite.internal.processors.cache.persistence.wal.filehandle.AbstractFileHandle) DataRecord(org.apache.ignite.internal.pagemem.wal.record.DataRecord) EOFException(java.io.EOFException) FileNotFoundException(java.io.FileNotFoundException) GridCacheSharedManagerAdapter(org.apache.ignite.internal.processors.cache.GridCacheSharedManagerAdapter) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) Objects(java.util.Objects) PageDeltaRecord(org.apache.ignite.internal.pagemem.wal.record.delta.PageDeltaRecord) Nullable(org.jetbrains.annotations.Nullable) List(java.util.List) FastCrc(org.apache.ignite.internal.processors.cache.persistence.wal.crc.FastCrc) WalSegmentCompactedEvent(org.apache.ignite.events.WalSegmentCompactedEvent) IGNITE_WAL_MMAP(org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_MMAP) DataInput(java.io.DataInput) HeaderRecord(org.apache.ignite.internal.processors.cache.persistence.wal.record.HeaderRecord) Pattern(java.util.regex.Pattern) RecordSerializerFactory(org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactory) AtomicLongArray(java.util.concurrent.atomic.AtomicLongArray) ZipInputStream(java.util.zip.ZipInputStream) WALDisableContext(org.apache.ignite.internal.processors.cache.WalStateManager.WALDisableContext) PdsFolderSettings(org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings) HashMap(java.util.HashMap) GridTimeoutProcessor(org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor) IgniteSystemProperties.getDouble(org.apache.ignite.IgniteSystemProperties.getDouble) HEADER_RECORD_SIZE(org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.HEADER_RECORD_SIZE) HashSet(java.util.HashSet) FailureContext(org.apache.ignite.failure.FailureContext) SegmentAware(org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware) IgnitePredicate(org.apache.ignite.lang.IgnitePredicate) TMP_SUFFIX(org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.TMP_SUFFIX) FileIOFactory(org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory) DataStorageConfiguration(org.apache.ignite.configuration.DataStorageConfiguration) SimpleSegmentFileInputFactory(org.apache.ignite.internal.processors.cache.persistence.wal.io.SimpleSegmentFileInputFactory) MemoryRecoveryRecord(org.apache.ignite.internal.pagemem.wal.record.MemoryRecoveryRecord) RolloverType(org.apache.ignite.internal.pagemem.wal.record.RolloverType) F(org.apache.ignite.internal.util.typedef.F) RecordV1Serializer.readSegmentHeader(org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.readSegmentHeader) GridTimeoutObject(org.apache.ignite.internal.processors.timeout.GridTimeoutObject) FileInputStream(java.io.FileInputStream) FileAlreadyExistsException(java.nio.file.FileAlreadyExistsException) Collectors.toList(java.util.stream.Collectors.toList) GridEventStorageManager(org.apache.ignite.internal.managers.eventstorage.GridEventStorageManager) Collections(java.util.Collections) IOException(java.io.IOException) File(java.io.File) StorageException(org.apache.ignite.internal.processors.cache.persistence.StorageException) FileIO(org.apache.ignite.internal.processors.cache.persistence.file.FileIO)

Aggregations

IOException (java.io.IOException)4 Files (java.nio.file.Files)4 REPLACE_EXISTING (java.nio.file.StandardCopyOption.REPLACE_EXISTING)4 Path (java.nio.file.Path)3 Paths (java.nio.file.Paths)3 List (java.util.List)3 File (java.io.File)2 ATOMIC_MOVE (java.nio.file.StandardCopyOption.ATOMIC_MOVE)2 ArrayList (java.util.ArrayList)2 Arrays (java.util.Arrays)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 Objects (java.util.Objects)2 Optional (java.util.Optional)2 Pattern (java.util.regex.Pattern)2 Parameter (com.beust.jcommander.Parameter)1 Parameters (com.beust.jcommander.Parameters)1 Session (com.facebook.presto.Session)1 BIGINT (com.facebook.presto.common.type.StandardTypes.BIGINT)1 BOOLEAN (com.facebook.presto.common.type.StandardTypes.BOOLEAN)1