use of java.nio.file.StandardCopyOption.REPLACE_EXISTING in project disunity by ata4.
the class BundleUnpack method runFile.
@Override
protected void runFile(Path file) {
try (BundleReader bundleReader = new BundleReader(file)) {
Bundle bundle = bundleReader.read();
AtomicInteger done = new AtomicInteger();
long total = bundle.entryInfos().size();
// define output directory, if not yet defined
if (outputDir == null) {
// with sub-directories
if (bundle.entryInfos().size() == 1) {
outputDir = file.getParent();
if (outputDir == null) {
// Passed a filename only. Use the current directory.
outputDir = Paths.get(".");
}
} else {
String fileName = PathUtils.getBaseName(file);
outputDir = file.resolveSibling(fileName);
}
}
try {
bundle.entries().stream().filter(entry -> filename == null || entry.name().equals(filename)).forEach(uncheck(entry -> {
progress.update(Optional.of(entry.name()), done.getAndIncrement() / (double) total);
Path entryFile = outputDir.resolve(entry.name());
Files.createDirectories(entryFile.getParent());
Files.copy(entry.inputStream(), entryFile, REPLACE_EXISTING);
if (done.get() == total) {
progress.update(Optional.empty(), 1);
}
}));
} catch (UncheckedIOException ex) {
throw ex.getCause();
}
if (writeProp && filename == null) {
String bundleName = outputDir.getFileName().toString();
Path propsFile = outputDir.getParent().resolve(bundleName + ".json");
BundleProps.write(propsFile, bundle);
}
} catch (IOException ex) {
L.log(Level.WARNING, "Can't unpack asset bundle " + file, ex);
}
}
use of java.nio.file.StandardCopyOption.REPLACE_EXISTING in project halyard by spinnaker.
the class Node method backupLocalFiles.
public List<String> backupLocalFiles(String outputPath) {
List<String> files = new ArrayList<>();
Consumer<Node> fileFinder = n -> files.addAll(n.localFiles().stream().map(f -> {
try {
f.setAccessible(true);
String fPath = (String) f.get(n);
if (fPath == null) {
return null;
}
File fFile = new File(fPath);
String fName = fFile.getName();
// Hash the path to uniquely flatten all files into the output directory
Path newName = Paths.get(outputPath, Math.abs(fPath.hashCode()) + "-" + fName);
File parent = newName.toFile().getParentFile();
if (!parent.exists()) {
parent.mkdirs();
} else if (fFile.getParent().equals(parent.toString())) {
// Don't move paths that are already in the right folder
return fPath;
}
Files.copy(Paths.get(fPath), newName, REPLACE_EXISTING);
f.set(n, newName.toString());
return newName.toString();
} catch (IllegalAccessException e) {
throw new RuntimeException("Failed to get local files for node " + n.getNodeName(), e);
} catch (IOException e) {
throw new HalException(FATAL, "Failed to backup user file: " + e.getMessage(), e);
} finally {
f.setAccessible(false);
}
}).filter(Objects::nonNull).collect(Collectors.toList()));
recursiveConsume(fileFinder);
return files;
}
use of java.nio.file.StandardCopyOption.REPLACE_EXISTING in project presto by prestodb.
the class TestHivePushdownFilterQueries method assertFileFormat.
private void assertFileFormat(HiveStorageFormat storageFormat) throws IOException {
// Make an ORC table backed by file of some other format
QueryRunner queryRunner = getQueryRunner();
queryRunner.execute("CREATE TABLE test_file_format_orc WITH (format='ORC', partitioned_by=ARRAY['ds']) AS " + "SELECT * FROM lineitem_ex_partitioned LIMIT 1");
try {
queryRunner.execute(format("CREATE TABLE test_file_format WITH (format='%s', partitioned_by=ARRAY['ds']) AS " + "SELECT * FROM lineitem_ex_partitioned", storageFormat));
Path orcDirectory = getPartitionDirectory("test_file_format_orc", "ds='2019-11-01'");
deleteRecursively(orcDirectory, ALLOW_INSECURE);
Path otherDirectory = getPartitionDirectory("test_file_format", "ds='2019-11-01'");
Files.move(otherDirectory, orcDirectory, REPLACE_EXISTING);
String cte = WITH_LINEITEM_EX + ", test_file_format_orc AS " + "(SELECT\n" + " linenumber,\n" + " orderkey,\n" + " partkey,\n" + " suppkey,\n" + " quantity,\n" + " extendedprice,\n" + " tax,\n" + " shipinstruct,\n" + " shipmode,\n" + " ship_by_air,\n" + " is_returned,\n" + " ship_day,\n" + " ship_month,\n" + " ship_timestamp,\n" + " commit_timestamp,\n" + " discount_real,\n" + " discount,\n" + " tax_real,\n" + " ship_day_month,\n" + " discount_long_decimal,\n" + " tax_short_decimal,\n" + " long_decimals,\n" + " keys,\n" + " doubles,\n" + " nested_keys,\n" + " flags,\n" + " reals,\n" + " info,\n" + " dates,\n" + " timestamps,\n" + " comment,\n" + " uppercase_comment,\n" + " fixed_comment,\n" + " char_array,\n" + " varchar_array,\n" + " '2019-11-01' AS ds\n" + "FROM lineitem_ex)";
// no filter
assertQueryUsingH2Cte("SELECT * FROM test_file_format_orc", cte);
assertQueryUsingH2Cte("SELECT comment FROM test_file_format_orc", cte);
assertQueryFails("SELECT COUNT(*) FROM test_file_format_orc", "Partial aggregation pushdown only supported for ORC/Parquet files. Table tpch.test_file_format_orc has file ((.*?)) of format (.*?). Set session property hive.pushdown_partial_aggregations_into_scan=false and execute query again");
assertQueryUsingH2Cte(noPartialAggregationPushdown(queryRunner.getDefaultSession()), "SELECT COUNT(*) FROM test_file_format_orc", cte, Function.identity());
// filter on partition column
assertQueryUsingH2Cte("SELECT comment from test_file_format_orc WHERE ds='2019-11-01'", cte);
assertQueryReturnsEmptyResult("SELECT comment FROM test_file_format_orc WHERE ds='2019-11-02'");
// range filters and filter functions
assertQueryUsingH2Cte("SELECT orderkey from test_file_format_orc WHERE orderkey < 1000", cte);
assertQueryUsingH2Cte("SELECT orderkey, comment from test_file_format_orc WHERE orderkey < 1000 AND comment LIKE '%final%'", cte);
assertQueryUsingH2Cte("SELECT COUNT(*) from test_file_format_orc WHERE orderkey < 1000", cte);
assertQueryUsingH2Cte("SELECT COUNT(*) FROM test_file_format_orc WHERE concat(ds,'*') = '2019-11-01*'", cte);
assertQueryUsingH2Cte("SELECT orderkey FROM test_file_format_orc WHERE comment LIKE '%final%'", cte);
assertQueryUsingH2Cte("SELECT discount FROM test_file_format_orc WHERE discount > 0.01", cte);
assertQueryUsingH2Cte("SELECT * FROM test_file_format_orc WHERE discount > 0.01 and discount + tax > 0.03", cte);
assertQueryUsingH2Cte("SELECT COUNT(*) FROM test_file_format_orc WHERE discount = 0.0", cte);
assertQueryUsingH2Cte("SELECT COUNT(*) FROM test_file_format_orc WHERE discount_real > 0.01", cte);
assertQueryUsingH2Cte("SELECT * FROM test_file_format_orc WHERE tax_real > 0.01 and discount_real > 0.01", cte);
assertQueryUsingH2Cte("SELECT keys FROM test_file_format_orc WHERE keys IS NOT NULL", cte);
assertQueryUsingH2Cte("SELECT keys FROM test_file_format_orc WHERE keys IS NULL", cte);
assertQueryUsingH2Cte("SELECT linenumber FROM test_file_format_orc WHERE keys[1] % 5 = 0 AND keys[2] > 100", cte);
assertQueryUsingH2Cte("SELECT * FROM test_file_format_orc WHERE is_returned=false", cte);
assertQueryUsingH2Cte("SELECT * FROM test_file_format_orc WHERE is_returned is NULL", cte);
assertQueryUsingH2Cte("SELECT ship_day FROM test_file_format_orc WHERE ship_day > 2", cte);
assertQueryUsingH2Cte("SELECT discount_long_decimal FROM test_file_format_orc WHERE discount_long_decimal > 0.05", cte);
assertQueryUsingH2Cte("SELECT tax_short_decimal FROM test_file_format_orc WHERE tax_short_decimal < 0.03", cte);
assertQueryUsingH2Cte("SELECT discount_long_decimal FROM test_file_format_orc WHERE discount_long_decimal > 0.01 AND tax_short_decimal > 0.01 AND (discount_long_decimal + tax_short_decimal) < 0.03", cte);
Function<String, String> rewriter = query -> query.replaceAll("info.orderkey", "info[1]").replaceAll("dates\\[1\\].day", "dates[1][1]");
assertQueryUsingH2Cte("SELECT dates FROM test_file_format_orc WHERE dates[1].day % 2 = 0", cte, rewriter);
assertQueryUsingH2Cte("SELECT info.orderkey, dates FROM test_file_format_orc WHERE info IS NOT NULL AND dates IS NOT NULL AND info.orderkey % 7 = 0", cte, rewriter);
// empty result
assertQueryReturnsEmptyResult("SELECT comment FROM test_file_format_orc WHERE orderkey < 0");
assertQueryReturnsEmptyResult("SELECT comment FROM test_file_format_orc WHERE comment LIKE '???'");
} finally {
assertUpdate("DROP TABLE IF EXISTS test_file_format");
assertUpdate("DROP TABLE test_file_format_orc");
}
}
use of java.nio.file.StandardCopyOption.REPLACE_EXISTING in project ignite by apache.
the class FileWriteAheadLogManager method formatWorkSegments.
/**
* Formatting working segments to {@link DataStorageConfiguration#getWalSegmentSize()} for work in a mmap or fsync case.
*
* @throws StorageException If an error occurs when formatting.
*/
private void formatWorkSegments() throws StorageException {
assert isArchiverEnabled();
if (mode == WALMode.FSYNC || mmap) {
List<FileDescriptor> toFormat = Arrays.stream(scan(walWorkDir.listFiles(WAL_SEGMENT_FILE_FILTER))).filter(fd -> fd.file().length() < dsCfg.getWalSegmentSize()).collect(toList());
if (!toFormat.isEmpty()) {
if (log.isInfoEnabled()) {
log.info("WAL segments in working directory should have the same size: '" + U.humanReadableByteCount(dsCfg.getWalSegmentSize()) + "'. Segments that need reformat " + "found: " + F.viewReadOnly(toFormat, fd -> fd.file().getName()) + '.');
}
for (int i = 0, j = 0; i < toFormat.size(); i++) {
FileDescriptor fd = toFormat.get(i);
File tmpDst = new File(fd.file().getName() + TMP_SUFFIX);
try {
Files.copy(fd.file().toPath(), tmpDst.toPath());
if (log.isDebugEnabled()) {
log.debug("Start formatting WAL segment [filePath=" + tmpDst.getAbsolutePath() + ", fileSize=" + U.humanReadableByteCount(tmpDst.length()) + ", toSize=" + U.humanReadableByteCount(dsCfg.getWalSegmentSize()) + ']');
}
try (FileIO fileIO = ioFactory.create(tmpDst, CREATE, READ, WRITE)) {
int left = (int) (dsCfg.getWalSegmentSize() - tmpDst.length());
fileIO.position(tmpDst.length());
while (left > 0) left -= fileIO.writeFully(FILL_BUF, 0, Math.min(FILL_BUF.length, left));
fileIO.force();
}
Files.move(tmpDst.toPath(), fd.file().toPath(), REPLACE_EXISTING, ATOMIC_MOVE);
if (log.isDebugEnabled())
log.debug("WAL segment formatted: " + fd.file().getAbsolutePath());
// Batch output.
if (log.isInfoEnabled() && (i == toFormat.size() - 1 || (i != 0 && i % 9 == 0))) {
log.info("WAL segments formatted: " + toFormat.get(j).file().getName() + (i == j ? "" : " - " + fileName(i)));
j = i + 1;
}
} catch (IOException e) {
throw new StorageException("Failed to format WAL segment: " + fd.file().getAbsolutePath(), e);
}
}
}
}
}
Aggregations