use of uk.gov.gchq.gaffer.parquetstore.operation.handler.utilities.AggregateAndSortData in project Gaffer by gchq.
the class AddElementsHandler method addElements.
private void addElements(final AddElements addElementsOperation, final Context context, final ParquetStore store) throws OperationException {
// Set up
final FileSystem fs = store.getFS();
final Schema schema = store.getSchema();
final SchemaUtils schemaUtils = store.getSchemaUtils();
final SparkSession spark = SparkContextUtil.getSparkSession(context, store.getProperties());
final ExecutorService threadPool = createThreadPool(spark, store.getProperties());
final GraphPartitioner currentGraphPartitioner = store.getGraphPartitioner();
SparkParquetUtils.configureSparkForAddElements(spark, store.getProperties());
// Write data from addElementsOperation split by group and partition (NB this uses the existing partitioner -
// adding elements using this operation does not effect the partitions).
final String tmpDirectory = store.getTempFilesDir();
final BiFunction<String, Integer, String> directoryForGroupAndPartitionId = (group, partitionId) -> tmpDirectory + "/unsorted_unaggregated_new" + "/group=" + group + "/partition=" + partitionId;
final BiFunction<String, Integer, String> directoryForGroupAndPartitionIdForReversedEdges = (group, partitionId) -> tmpDirectory + "/unsorted_unaggregated_new" + "/reversed-group=" + group + "/partition=" + partitionId;
LOGGER.info("Calling WriteUnsortedData to add elements");
LOGGER.trace("currentGraphPartitioner is {}", currentGraphPartitioner);
new WriteUnsortedData(store, currentGraphPartitioner, directoryForGroupAndPartitionId, directoryForGroupAndPartitionIdForReversedEdges).writeElements(addElementsOperation.getInput());
// For every group and partition, aggregate the new data with the old data and then sort
final BiFunction<String, Integer, String> directoryForSortedResultsForGroupAndPartitionId = (group, partitionId) -> tmpDirectory + "/sorted_new_old_merged" + "/group=" + group + "/partition=" + partitionId;
final BiFunction<String, Integer, String> directoryForSortedResultsForGroupAndPartitionIdForReversedEdges = (group, partitionId) -> tmpDirectory + "/sorted_new_old_merged" + "/REVERSED-group=" + group + "/partition=" + partitionId;
final List<Callable<CallableResult>> tasks = new ArrayList<>();
for (final String group : schema.getGroups()) {
final List<Partition> partitions = currentGraphPartitioner.getGroupPartitioner(group).getPartitions();
for (final Partition partition : partitions) {
final List<String> inputFiles = new ArrayList<>();
// New data
inputFiles.add(directoryForGroupAndPartitionId.apply(group, partition.getPartitionId()));
// Old data
inputFiles.add(store.getFile(group, partition));
final String outputDir = directoryForSortedResultsForGroupAndPartitionId.apply(group, partition.getPartitionId());
final AggregateAndSortData task = new AggregateAndSortData(schemaUtils, fs, inputFiles, outputDir, group, group + "-" + partition.getPartitionId(), false, store.getProperties().getCompressionCodecName(), spark);
tasks.add(task);
LOGGER.info("Created AggregateAndSortData task for group {}, partition {}", group, partition.getPartitionId());
}
}
for (final String group : schema.getEdgeGroups()) {
final List<Partition> partitions = currentGraphPartitioner.getGroupPartitionerForReversedEdges(group).getPartitions();
for (final Partition partition : partitions) {
final List<String> inputFiles = new ArrayList<>();
// New data
inputFiles.add(directoryForGroupAndPartitionIdForReversedEdges.apply(group, partition.getPartitionId()));
// Old data
inputFiles.add(store.getFileForReversedEdges(group, partition));
final String outputDir = directoryForSortedResultsForGroupAndPartitionIdForReversedEdges.apply(group, partition.getPartitionId());
final AggregateAndSortData task = new AggregateAndSortData(schemaUtils, fs, inputFiles, outputDir, group, "reversed-" + group + "-" + partition.getPartitionId(), true, store.getProperties().getCompressionCodecName(), spark);
tasks.add(task);
LOGGER.info("Created AggregateAndSortData task for reversed edge group {}, partition {}", group, partition.getPartitionId());
}
}
try {
LOGGER.info("Invoking {} AggregateAndSortData tasks", tasks.size());
final List<Future<CallableResult>> futures = threadPool.invokeAll(tasks);
for (final Future<CallableResult> future : futures) {
final CallableResult result = future.get();
LOGGER.info("Result {} from task", result);
}
} catch (final InterruptedException e) {
throw new OperationException("InterruptedException running AggregateAndSortData tasks", e);
} catch (final ExecutionException e) {
throw new OperationException("ExecutionException running AggregateAndSortData tasks", e);
}
try {
// Move results to a new snapshot directory (the -tmp at the end allows us to add data to the directory,
// and then when this is all finished we rename the directory to remove the -tmp; this allows us to make
// the replacement of the old data with the new data an atomic operation and ensures that a get operation
// against the store will not read the directory when only some of the data has been moved there).
final long snapshot = System.currentTimeMillis();
final String newDataDir = store.getDataDir() + "/" + ParquetStore.getSnapshotPath(snapshot) + "-tmp";
LOGGER.info("Moving aggregated and sorted data to new snapshot directory {}", newDataDir);
fs.mkdirs(new Path(newDataDir));
for (final String group : schema.getGroups()) {
final Path groupDir = new Path(newDataDir, ParquetStore.getGroupSubDir(group, false));
fs.mkdirs(groupDir);
LOGGER.info("Created directory {}", groupDir);
}
for (final String group : schema.getEdgeGroups()) {
final Path groupDir = new Path(newDataDir, ParquetStore.getGroupSubDir(group, true));
fs.mkdirs(groupDir);
LOGGER.info("Created directory {}", groupDir);
}
for (final String group : schema.getGroups()) {
final String groupDir = newDataDir + "/" + ParquetStore.getGroupSubDir(group, false);
final List<Partition> partitions = currentGraphPartitioner.getGroupPartitioner(group).getPartitions();
for (final Partition partition : partitions) {
final Path outputDir = new Path(directoryForSortedResultsForGroupAndPartitionId.apply(group, partition.getPartitionId()));
if (!fs.exists(outputDir)) {
LOGGER.info("Not moving data for group {}, partition id {} as the outputDir {} does not exist", group, partition.getPartitionId(), outputDir);
} else {
// One .parquet file and one .parquet.crc file
final FileStatus[] status = fs.listStatus(outputDir, path -> path.getName().endsWith(".parquet"));
if (1 != status.length) {
LOGGER.error("Didn't find one Parquet file in path {} (found {} files)", outputDir, status.length);
throw new OperationException("Expected to find one Parquet file in path " + outputDir + " (found " + status.length + " files)");
} else {
final Path destination = new Path(groupDir, ParquetStore.getFile(partition.getPartitionId()));
LOGGER.info("Renaming {} to {}", status[0].getPath(), destination);
fs.rename(status[0].getPath(), destination);
}
}
}
}
for (final String group : schema.getEdgeGroups()) {
final String groupDir = newDataDir + "/" + ParquetStore.getGroupSubDir(group, true);
final List<Partition> partitions = currentGraphPartitioner.getGroupPartitionerForReversedEdges(group).getPartitions();
for (final Partition partition : partitions) {
final Path outputDir = new Path(directoryForSortedResultsForGroupAndPartitionIdForReversedEdges.apply(group, partition.getPartitionId()));
if (!fs.exists(outputDir)) {
LOGGER.info("Not moving data for reversed edge group {}, partition id {} as the outputDir {} does not exist", group, partition.getPartitionId(), outputDir);
} else {
// One .parquet file and one .parquet.crc file
final FileStatus[] status = fs.listStatus(outputDir, path -> path.getName().endsWith(".parquet"));
if (1 != status.length) {
LOGGER.error("Didn't find one Parquet file in path {} (found {} files)", outputDir, status.length);
throw new OperationException("Expected to find one Parquet file in path " + outputDir + " (found " + status.length + " files)");
} else {
final Path destination = new Path(groupDir, ParquetStore.getFile(partition.getPartitionId()));
LOGGER.info("Renaming {} to {}", status[0].getPath(), destination);
fs.rename(status[0].getPath(), destination);
}
}
}
}
// Delete temporary data directory
LOGGER.info("Deleting temporary directory {}", tmpDirectory);
fs.delete(new Path(tmpDirectory), true);
// Write out graph partitioner (unchanged from previous one)
final Path newGraphPartitionerPath = new Path(newDataDir + "/graphPartitioner");
final FSDataOutputStream stream = fs.create(newGraphPartitionerPath);
LOGGER.info("Writing graph partitioner to {}", newGraphPartitionerPath);
new GraphPartitionerSerialiser().write(currentGraphPartitioner, stream);
stream.close();
// Move snapshot-tmp directory to snapshot
final String directoryWithoutTmp = newDataDir.substring(0, newDataDir.lastIndexOf("-tmp"));
LOGGER.info("Renaming {} to {}", newDataDir, directoryWithoutTmp);
fs.rename(new Path(newDataDir), new Path(directoryWithoutTmp));
// Set snapshot on store to new value
LOGGER.info("Updating latest snapshot on store to {}", snapshot);
store.setLatestSnapshot(snapshot);
} catch (final IOException | StoreException e) {
throw new OperationException("IOException moving results files into new snapshot directory", e);
}
}
use of uk.gov.gchq.gaffer.parquetstore.operation.handler.utilities.AggregateAndSortData in project Gaffer by gchq.
the class AggregateAndSortDataTest method test.
@Test
public void test(@TempDir java.nio.file.Path tempDir) throws Exception {
// Given
final FileSystem fs = FileSystem.get(new Configuration());
final SchemaUtils schemaUtils = new SchemaUtils(TestUtils.gafferSchema("schemaUsingLongVertexType"));
final String file1 = tempDir.resolve("inputdata1.parquet").toString();
final String file2 = tempDir.resolve("inputdata2.parquet").toString();
writeData(file1, schemaUtils);
writeData(file2, schemaUtils);
final SparkSession sparkSession = SparkSessionProvider.getSparkSession();
final List<String> inputFiles = new ArrayList<>(Sets.newHashSet(file1, file2));
final String outputFolder = tempDir.resolve("aggregated").toString();
// When
new AggregateAndSortData(schemaUtils, fs, inputFiles, outputFolder, TestGroups.ENTITY, "test", false, CompressionCodecName.GZIP, sparkSession).call();
// Then
assertTrue(fs.exists(new Path(outputFolder)));
final Row[] results = (Row[]) sparkSession.read().parquet(outputFolder).collect();
// Should be sorted by vertex and date
for (int i = 0; i < 40; i++) {
assertEquals((long) i / 2, (long) results[i].getAs(ParquetStore.VERTEX));
assertEquals(i % 2 == 0 ? 'b' : 'a', ((byte[]) results[i].getAs("byte"))[0]);
assertEquals(i % 2 == 0 ? 8f : 6f, results[i].getAs("float"), 0.01f);
assertEquals(11L * 2 * (i / 2), (long) results[i].getAs("long"));
assertEquals(i % 2 == 0 ? 14 : 12, (int) results[i].getAs("short"));
assertEquals(i % 2 == 0 ? 100000L : 200000L, (long) results[i].getAs("date"));
assertEquals(2, (int) results[i].getAs("count"));
assertArrayEquals(i % 2 == 0 ? new String[] { "A", "C" } : new String[] { "A", "B" }, (String[]) ((WrappedArray<String>) results[i].getAs("treeSet")).array());
final FreqMap mergedFreqMap1 = new FreqMap();
mergedFreqMap1.put("A", 2L);
mergedFreqMap1.put("B", 2L);
final FreqMap mergedFreqMap2 = new FreqMap();
mergedFreqMap2.put("A", 2L);
mergedFreqMap2.put("C", 2L);
assertEquals(JavaConversions$.MODULE$.mapAsScalaMap(i % 2 == 0 ? mergedFreqMap2 : mergedFreqMap1), results[i].getAs("freqMap"));
}
}
Aggregations