use of org.apache.hyracks.api.io.FileSplit in project asterixdb by apache.
the class SplitsAndConstraintsUtil method getIndexSplits.
public static FileSplit[] getIndexSplits(Dataset dataset, String indexName, List<String> nodes) {
File relPathFile = new File(StoragePathUtil.prepareDataverseIndexName(dataset.getDataverseName(), dataset.getDatasetName(), indexName, dataset.getRebalanceCount()));
String storageDirName = ClusterProperties.INSTANCE.getStorageDirectoryName();
List<FileSplit> splits = new ArrayList<>();
for (String nd : nodes) {
int numPartitions = ClusterStateManager.INSTANCE.getNodePartitionsCount(nd);
ClusterPartition[] nodePartitions = ClusterStateManager.INSTANCE.getNodePartitions(nd);
// currently this case is never executed since the metadata group doesn't exists
if (dataset.getNodeGroupName().compareTo(MetadataConstants.METADATA_NODEGROUP_NAME) == 0) {
numPartitions = 1;
}
for (int k = 0; k < numPartitions; k++) {
// format: 'storage dir name'/partition_#/dataverse/dataset_idx_index
File f = new File(StoragePathUtil.prepareStoragePartitionPath(storageDirName, nodePartitions[k].getPartitionId()) + (dataset.isTemp() ? (File.separator + StoragePathUtil.TEMP_DATASETS_STORAGE_FOLDER) : "") + File.separator + relPathFile);
splits.add(StoragePathUtil.getFileSplitForClusterPartition(nodePartitions[k], f.getPath()));
}
}
return splits.toArray(new FileSplit[] {});
}
use of org.apache.hyracks.api.io.FileSplit in project asterixdb by apache.
the class IndexDataflowHelperFactory method create.
@Override
public IIndexDataflowHelper create(IHyracksTaskContext ctx, int partition) throws HyracksDataException {
FileSplit fileSplit = fileSplitProvider.getFileSplits()[partition];
FileReference resourceRef = fileSplit.getFileReference(ctx.getIoManager());
return new IndexDataflowHelper(ctx.getJobletContext().getServiceContext(), storageMgr, resourceRef);
}
use of org.apache.hyracks.api.io.FileSplit in project asterixdb by apache.
the class PigletMetadataProvider method getWriteFileRuntime.
@Override
public Pair<IPushRuntimeFactory, AlgebricksPartitionConstraint> getWriteFileRuntime(IDataSink sink, int[] printColumns, IPrinterFactory[] printerFactories, RecordDescriptor inputDesc) throws AlgebricksException {
PigletFileDataSink ds = (PigletFileDataSink) sink;
FileSplit[] fileSplits = ds.getFileSplits();
String[] locations = new String[fileSplits.length];
for (int i = 0; i < fileSplits.length; ++i) {
locations[i] = fileSplits[i].getNodeName();
}
IPushRuntimeFactory prf;
try {
prf = new SinkWriterRuntimeFactory(printColumns, printerFactories, fileSplits[0].getFile(null), PrinterBasedWriterFactory.INSTANCE, inputDesc);
AlgebricksAbsolutePartitionConstraint constraint = new AlgebricksAbsolutePartitionConstraint(locations);
return new Pair<>(prf, constraint);
} catch (HyracksDataException e) {
throw new AlgebricksException(e);
}
}
use of org.apache.hyracks.api.io.FileSplit in project asterixdb by apache.
the class AbstractBTreeOperatorTest method setup.
@Before
public void setup() throws Exception {
testHelper = createTestHelper();
String primaryFileName = testHelper.getPrimaryIndexName();
primarySplitProvider = new ConstantFileSplitProvider(new FileSplit[] { new ManagedFileSplit(NC1_ID, primaryFileName) });
String secondaryFileName = testHelper.getSecondaryIndexName();
primaryHelperFactory = new IndexDataflowHelperFactory(storageManager, primarySplitProvider);
secondarySplitProvider = new ConstantFileSplitProvider(new FileSplit[] { new ManagedFileSplit(NC1_ID, secondaryFileName) });
secondaryHelperFactory = new IndexDataflowHelperFactory(storageManager, secondarySplitProvider);
}
use of org.apache.hyracks.api.io.FileSplit in project asterixdb by apache.
the class JobHelper method createFileSplitProvider.
public static IFileSplitProvider createFileSplitProvider(String[] splitNCs, String btreeFileName) {
FileSplit[] fileSplits = new FileSplit[splitNCs.length];
for (int i = 0; i < splitNCs.length; ++i) {
String fileName = btreeFileName + "." + splitNCs[i];
fileSplits[i] = new ManagedFileSplit(splitNCs[i], fileName);
}
return new ConstantFileSplitProvider(fileSplits);
}
Aggregations