use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsAbstractSelfTest method testAppendConsistencyMultithreaded.
/**
* Ensure append consistency when multiple threads writes to the same file.
*
* @throws Exception If failed.
*/
public void testAppendConsistencyMultithreaded() throws Exception {
if (appendSupported()) {
final AtomicBoolean stop = new AtomicBoolean();
// How many chunks were written.
final AtomicInteger chunksCtr = new AtomicInteger();
final AtomicReference<Exception> err = new AtomicReference<>();
igfs.create(FILE, false).close();
int threadCnt = 50;
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@SuppressWarnings("ThrowFromFinallyBlock")
@Override
public void run() {
while (!stop.get() && err.get() == null) {
IgfsOutputStream os = null;
try {
os = igfs.append(FILE, false);
os.write(chunk);
os.close();
chunksCtr.incrementAndGet();
} catch (IgniteException ignore) {
// No-op.
} catch (IOException e) {
err.compareAndSet(null, e);
} finally {
if (os != null)
try {
os.close();
} catch (IOException ioe) {
throw new IgniteException(ioe);
}
}
}
}
}, threadCnt);
long startTime = U.currentTimeMillis();
while (err.get() == null && chunksCtr.get() < 50 && U.currentTimeMillis() - startTime < 60 * 1000) U.sleep(100);
stop.set(true);
fut.get();
awaitFileClose(igfs, FILE);
if (err.get() != null) {
X.println("Test failed: rethrowing first error: " + err.get());
throw err.get();
}
byte[][] data = new byte[chunksCtr.get()][];
Arrays.fill(data, chunk);
checkFileContent(igfs, FILE, data);
}
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsBackupFailoverSelfTest method testWriteFailoverWhileStoppingMultipleNodes.
/**
*
* @throws Exception
*/
public void testWriteFailoverWhileStoppingMultipleNodes() throws Exception {
final IgfsImpl igfs0 = nodeDatas[0].igfsImpl;
clear(igfs0);
IgfsAbstractSelfTest.create(igfs0, paths(DIR, SUBDIR), null);
final IgfsOutputStream[] outStreams = new IgfsOutputStream[files];
// Create files:
for (int f = 0; f < files; f++) {
final byte[] data = createChunk(fileSize, f);
IgfsOutputStream os = null;
try {
os = igfs0.create(filePath(f), 256, true, null, 0, -1, null);
assert os != null;
writeFileChunks(os, data);
} finally {
if (os != null)
os.flush();
}
outStreams[f] = os;
X.println("write #1 completed: " + f);
}
final AtomicBoolean stop = new AtomicBoolean();
GridTestUtils.runMultiThreadedAsync(new Callable() {
@Override
public Object call() throws Exception {
// Some delay to ensure read is in progress.
Thread.sleep(10_000);
// Now stop all the nodes but the 1st:
for (int n = 1; n < numIgfsNodes; n++) {
stopGrid(n);
X.println("#### grid " + n + " stopped.");
}
//Thread.sleep(10_000);
stop.set(true);
return null;
}
}, 1, "igfs-node-stopper");
// Write #2:
for (int f0 = 0; f0 < files; f0++) {
final IgfsOutputStream os = outStreams[f0];
assert os != null;
final int f = f0;
int att = doWithRetries(1, new Callable<Void>() {
@Override
public Void call() throws Exception {
IgfsOutputStream ios = os;
try {
writeChunks0(igfs0, ios, f);
} catch (IOException ioe) {
log().warning("Attempt to append the data to existing stream failed: ", ioe);
ios = igfs0.append(filePath(f), false);
assert ios != null;
writeChunks0(igfs0, ios, f);
}
return null;
}
});
assert att == 1;
X.println("write #2 completed: " + f0 + " in " + att + " attempts.");
}
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return stop.get();
}
}, 25_000);
// Check files:
for (int f = 0; f < files; f++) {
IgfsPath path = filePath(f);
byte[] data = createChunk(fileSize, f);
// Check through 1st node:
checkExist(igfs0, path);
assertEquals("File length mismatch.", data.length * 2, igfs0.size(path));
checkFileContent(igfs0, path, data, data);
X.println("Read test completed: " + f);
}
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsSizeSelfTest method write.
/**
* Perform write of the files.
*
* @return Collection of written file descriptors.
* @throws Exception If failed.
*/
private Collection<IgfsFile> write() throws Exception {
Collection<IgfsFile> res = new HashSet<>(FILES_CNT, 1.0f);
ThreadLocalRandom8 rand = ThreadLocalRandom8.current();
for (int i = 0; i < FILES_CNT; i++) {
// Create empty file locally.
IgfsPath path = new IgfsPath("/file-" + i);
igfs(0).create(path, false).close();
IgfsMetaManager meta = igfs(0).context().meta();
IgniteUuid fileId = meta.fileId(path);
// Calculate file blocks.
int fileSize = rand.nextInt(MAX_FILE_SIZE);
int fullBlocks = fileSize / BLOCK_SIZE;
int remainderSize = fileSize % BLOCK_SIZE;
Collection<IgfsBlock> blocks = new ArrayList<>(fullBlocks + remainderSize > 0 ? 1 : 0);
for (int j = 0; j < fullBlocks; j++) blocks.add(new IgfsBlock(new IgfsBlockKey(fileId, null, true, j), BLOCK_SIZE));
if (remainderSize > 0)
blocks.add(new IgfsBlock(new IgfsBlockKey(fileId, null, true, fullBlocks), remainderSize));
IgfsFile file = new IgfsFile(path, fileSize, blocks);
// Actual write.
for (IgfsBlock block : blocks) {
IgfsOutputStream os = igfs(0).append(path, false);
os.write(chunk(block.length()));
os.close();
}
// Add written file to the result set.
res.add(file);
}
return res;
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsMetricsSelfTest method testBlockMetrics.
/**
* Test block metrics.
*
* @throws Exception If failed.
*/
@SuppressWarnings({ "ResultOfMethodCallIgnored", "ConstantConditions" })
public void testBlockMetrics() throws Exception {
IgfsEx igfs = (IgfsEx) igfsPrimary[0];
IgfsPath fileRemote = new IgfsPath("/fileRemote");
IgfsPath file1 = new IgfsPath("/primary/file1");
IgfsPath file2 = new IgfsPath("/primary/file2");
// Create remote file and write some data to it.
IgfsOutputStream out = igfsSecondary.create(fileRemote, 256, true, null, 1, 256, null);
int rmtBlockSize = igfsSecondary.info(fileRemote).blockSize();
out.write(new byte[rmtBlockSize]);
out.close();
// Start metrics measuring.
IgfsMetrics initMetrics = igfs.metrics();
// Create empty file.
igfs.create(file1, 256, true, null, 1, 256, null).close();
int blockSize = igfs.info(file1).blockSize();
checkBlockMetrics(initMetrics, igfs.metrics(), 0, 0, 0, 0, 0, 0);
// Write two blocks to the file.
IgfsOutputStream os = igfs.append(file1, false);
os.write(new byte[blockSize * 2]);
os.close();
checkBlockMetrics(initMetrics, igfs.metrics(), 0, 0, 0, 2, 0, blockSize * 2);
// Write one more file (one block).
os = igfs.create(file2, 256, true, null, 1, 256, null);
os.write(new byte[blockSize]);
os.close();
checkBlockMetrics(initMetrics, igfs.metrics(), 0, 0, 0, 3, 0, blockSize * 3);
// Read data from the first file.
IgfsInputStream is = igfs.open(file1);
is.readFully(0, new byte[blockSize * 2]);
is.close();
checkBlockMetrics(initMetrics, igfs.metrics(), 2, 0, blockSize * 2, 3, 0, blockSize * 3);
// Read data from the second file with hits.
is = igfs.open(file2);
is.read(new byte[blockSize]);
is.close();
checkBlockMetrics(initMetrics, igfs.metrics(), 3, 0, blockSize * 3, 3, 0, blockSize * 3);
// Clear the first file.
igfs.create(file1, true).close();
checkBlockMetrics(initMetrics, igfs.metrics(), 3, 0, blockSize * 3, 3, 0, blockSize * 3);
// Delete the second file.
igfs.delete(file2, false);
checkBlockMetrics(initMetrics, igfs.metrics(), 3, 0, blockSize * 3, 3, 0, blockSize * 3);
// Read remote file.
is = igfs.open(fileRemote);
is.read(new byte[rmtBlockSize]);
is.close();
checkBlockMetrics(initMetrics, igfs.metrics(), 4, 1, blockSize * 3 + rmtBlockSize, 3, 0, blockSize * 3);
// Lets wait for blocks will be placed to cache
U.sleep(300);
// Read remote file again.
is = igfs.open(fileRemote);
is.read(new byte[rmtBlockSize]);
is.close();
checkBlockMetrics(initMetrics, igfs.metrics(), 5, 1, blockSize * 3 + rmtBlockSize * 2, 3, 0, blockSize * 3);
IgfsMetrics metrics = igfs.metrics();
assert metrics.secondarySpaceSize() == rmtBlockSize;
// Write some data to the file working in DUAL mode.
os = igfs.append(fileRemote, false);
os.write(new byte[rmtBlockSize]);
os.close();
// Additional block read here due to file ending synchronization.
checkBlockMetrics(initMetrics, igfs.metrics(), 5, 1, blockSize * 3 + rmtBlockSize * 2, 4, 1, blockSize * 3 + rmtBlockSize);
metrics = igfs.metrics();
assert metrics.secondarySpaceSize() == rmtBlockSize * 2;
igfs.delete(fileRemote, false);
U.sleep(300);
assert igfs.metrics().secondarySpaceSize() == 0;
// Write partial block to the first file.
os = igfs.append(file1, false);
os.write(new byte[blockSize / 2]);
os.close();
checkBlockMetrics(initMetrics, igfs.metrics(), 5, 1, blockSize * 3 + rmtBlockSize * 2, 5, 1, blockSize * 7 / 2 + rmtBlockSize);
igfs.resetMetrics();
metrics = igfs.metrics();
assert metrics.blocksReadTotal() == 0;
assert metrics.blocksReadRemote() == 0;
assert metrics.blocksWrittenTotal() == 0;
assert metrics.blocksWrittenRemote() == 0;
assert metrics.bytesRead() == 0;
assert metrics.bytesReadTime() == 0;
assert metrics.bytesWritten() == 0;
assert metrics.bytesWriteTime() == 0;
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsProcessorSelfTest method testSize.
/**
* Ensure correct size calculation.
*
* @throws Exception If failed.
*/
public void testSize() throws Exception {
IgfsPath dir1 = path("/dir1");
IgfsPath subDir1 = path("/dir1/subdir1");
IgfsPath dir2 = path("/dir2");
IgfsPath fileDir1 = path("/dir1/file");
IgfsPath fileSubdir1 = path("/dir1/subdir1/file");
IgfsPath fileDir2 = path("/dir2/file");
IgfsOutputStream os = igfs.create(fileDir1, false);
os.write(new byte[1000]);
os.close();
os = igfs.create(fileSubdir1, false);
os.write(new byte[2000]);
os.close();
os = igfs.create(fileDir2, false);
os.write(new byte[4000]);
os.close();
assert igfs.size(fileDir1) == 1000;
assert igfs.size(fileSubdir1) == 2000;
assert igfs.size(fileDir2) == 4000;
assert igfs.size(dir1) == 3000;
assert igfs.size(subDir1) == 2000;
assert igfs.size(dir2) == 4000;
}
Aggregations