use of org.apache.accumulo.tserver.log.MultiReader in project accumulo by apache.
the class LogReader method main.
/**
* Dump a Log File (Map or Sequence) to stdout. Will read from HDFS or local file system.
*
* @param args
* - first argument is the file to print
*/
public static void main(String[] args) throws IOException {
Opts opts = new Opts();
opts.parseArgs(LogReader.class.getName(), args);
VolumeManager fs = VolumeManagerImpl.get();
Matcher rowMatcher = null;
KeyExtent ke = null;
Text row = null;
if (opts.files.isEmpty()) {
new JCommander(opts).usage();
return;
}
if (opts.row != null)
row = new Text(opts.row);
if (opts.extent != null) {
String[] sa = opts.extent.split(";");
ke = new KeyExtent(Table.ID.of(sa[0]), new Text(sa[1]), new Text(sa[2]));
}
if (opts.regexp != null) {
Pattern pattern = Pattern.compile(opts.regexp);
rowMatcher = pattern.matcher("");
}
Set<Integer> tabletIds = new HashSet<>();
for (String file : opts.files) {
Path path = new Path(file);
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
if (fs.isFile(path)) {
try (final FSDataInputStream fsinput = fs.open(path)) {
// read log entries from a simple hdfs file
DFSLoggerInputStreams streams;
try {
streams = DfsLogger.readHeaderAndReturnStream(fsinput, SiteConfiguration.getInstance());
} catch (LogHeaderIncompleteException e) {
log.warn("Could not read header for {} . Ignoring...", path);
continue;
}
try (DataInputStream input = streams.getDecryptingInputStream()) {
while (true) {
try {
key.readFields(input);
value.readFields(input);
} catch (EOFException ex) {
break;
}
printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
}
}
}
} else {
// read the log entries sorted in a map file
MultiReader input = new MultiReader(fs, path);
while (input.next(key, value)) {
printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
}
}
}
}
Aggregations