use of org.apache.cassandra.io.sstable.SSTableLoader in project cassandra by apache.
the class BulkLoader method load.
public static void load(LoaderOptions options) throws BulkLoadException {
DatabaseDescriptor.toolInitialization();
OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug);
SSTableLoader loader = new SSTableLoader(options.directory.getAbsoluteFile(), new ExternalClient(options.hosts, options.nativePort, options.authProvider, options.storagePort, options.sslStoragePort, options.serverEncOptions, buildSSLOptions(options.clientEncOptions)), handler, options.connectionsPerHost);
DatabaseDescriptor.setStreamThroughputOutboundMegabitsPerSec(options.throttle);
DatabaseDescriptor.setInterDCStreamThroughputOutboundMegabitsPerSec(options.interDcThrottle);
StreamResultFuture future = null;
ProgressIndicator indicator = new ProgressIndicator();
try {
if (options.noProgress) {
future = loader.stream(options.ignores);
} else {
future = loader.stream(options.ignores, indicator);
}
} catch (Exception e) {
JVMStabilityInspector.inspectThrowable(e);
System.err.println(e.getMessage());
if (e.getCause() != null) {
System.err.println(e.getCause());
}
e.printStackTrace(System.err);
throw new BulkLoadException(e);
}
try {
future.get();
if (!options.noProgress) {
indicator.printSummary(options.connectionsPerHost);
}
// Give sockets time to gracefully close
Thread.sleep(1000);
// System.exit(0); // We need that to stop non daemonized threads
} catch (Exception e) {
System.err.println("Streaming to the following hosts failed:");
System.err.println(loader.getFailedHosts());
e.printStackTrace(System.err);
throw new BulkLoadException(e);
}
}
use of org.apache.cassandra.io.sstable.SSTableLoader in project eiger by wlloyd.
the class BulkLoader method main.
public static void main(String[] args) throws IOException {
LoaderOptions options = LoaderOptions.parseArgs(args);
try {
SSTableLoader loader = new SSTableLoader(options.directory, new ExternalClient(options, options.hosts, options.rpcPort), options);
SSTableLoader.LoaderFuture future = loader.stream(options.ignores);
if (options.noProgress) {
future.get();
} else {
ProgressIndicator indicator = new ProgressIndicator(future.getPendingFiles());
indicator.start();
System.out.println("");
boolean printEnd = false;
while (!future.isDone()) {
if (indicator.printProgress()) {
// We're done with streaming
System.out.println("\nWaiting for targets to rebuild indexes ...");
printEnd = true;
future.get();
assert future.isDone();
} else {
try {
Thread.sleep(1000L);
} catch (Exception e) {
}
}
}
if (!printEnd)
indicator.printProgress();
}
// We need that to stop non daemonized threads
System.exit(0);
} catch (Exception e) {
System.err.println(e.getMessage());
if (options.debug)
e.printStackTrace(System.err);
System.exit(1);
}
}
use of org.apache.cassandra.io.sstable.SSTableLoader in project eiger by wlloyd.
the class StorageService method bulkLoad.
@Override
public void bulkLoad(String directory) {
File dir = new File(directory);
if (!dir.exists() || !dir.isDirectory())
throw new IllegalArgumentException("Invalid directory " + directory);
SSTableLoader.Client client = new SSTableLoader.Client() {
@Override
public void init(String keyspace) {
for (Map.Entry<Range<Token>, List<InetAddress>> entry : StorageService.instance.getRangeToAddressMap(keyspace).entrySet()) {
Range<Token> range = entry.getKey();
for (InetAddress endpoint : entry.getValue()) addRangeForEndpoint(range, endpoint);
}
}
@Override
public boolean validateColumnFamily(String keyspace, String cfName) {
return Schema.instance.getCFMetaData(keyspace, cfName) != null;
}
};
SSTableLoader.OutputHandler oh = new SSTableLoader.OutputHandler() {
@Override
public void output(String msg) {
logger_.info(msg);
}
@Override
public void debug(String msg) {
logger_.debug(msg);
}
};
SSTableLoader loader = new SSTableLoader(dir, client, oh);
try {
loader.stream().get();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.cassandra.io.sstable.SSTableLoader in project cassandra by apache.
the class CqlBulkRecordWriter method prepareWriter.
private void prepareWriter() throws IOException {
if (writer == null) {
writer = CQLSSTableWriter.builder().forTable(schema).using(insertStatement).withPartitioner(ConfigHelper.getOutputPartitioner(conf)).inDirectory(outputDir).withBufferSizeInMB(Integer.parseInt(conf.get(BUFFER_SIZE_IN_MB, "64"))).withPartitioner(partitioner).build();
}
if (loader == null) {
ExternalClient externalClient = new ExternalClient(conf);
externalClient.setTableMetadata(TableMetadataRef.forOfflineTools(CreateTableStatement.parse(schema, keyspace).build()));
loader = new SSTableLoader(outputDir, externalClient, new NullOutputHandler()) {
@Override
public void onSuccess(StreamState finalState) {
if (deleteSrc)
FileUtils.deleteRecursive(outputDir);
}
};
}
}
use of org.apache.cassandra.io.sstable.SSTableLoader in project cassandra by apache.
the class StorageService method bulkLoadInternal.
private StreamResultFuture bulkLoadInternal(String directory) {
File dir = new File(directory);
if (!dir.exists() || !dir.isDirectory())
throw new IllegalArgumentException("Invalid directory " + directory);
SSTableLoader.Client client = new SSTableLoader.Client() {
private String keyspace;
public void init(String keyspace) {
this.keyspace = keyspace;
try {
for (Map.Entry<Range<Token>, List<InetAddress>> entry : StorageService.instance.getRangeToAddressMap(keyspace).entrySet()) {
Range<Token> range = entry.getKey();
for (InetAddress endpoint : entry.getValue()) addRangeForEndpoint(range, endpoint);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public TableMetadataRef getTableMetadata(String tableName) {
return Schema.instance.getTableMetadataRef(keyspace, tableName);
}
};
return new SSTableLoader(dir, client, new OutputHandler.LogOutput()).stream();
}
Aggregations