use of java.util.Scanner in project camel by apache.
the class BindyKeyValuePairDataFormat method unmarshal.
public Object unmarshal(Exchange exchange, InputStream inputStream) throws Exception {
BindyKeyValuePairFactory factory = (BindyKeyValuePairFactory) getFactory();
// List of Pojos
List<Map<String, Object>> models = new ArrayList<Map<String, Object>>();
// Pojos of the model
Map<String, Object> model;
// Map to hold the model @OneToMany classes while binding
Map<String, List<Object>> lists = new HashMap<String, List<Object>>();
InputStreamReader in = new InputStreamReader(inputStream, IOHelper.getCharsetName(exchange));
// Scanner is used to read big file
Scanner scanner = new Scanner(in);
// Retrieve the pair separator defined to split the record
ObjectHelper.notNull(factory.getPairSeparator(), "The pair separator property of the annotation @Message");
String separator = factory.getPairSeparator();
int count = 0;
try {
while (scanner.hasNextLine()) {
// Read the line
String line = scanner.nextLine().trim();
if (ObjectHelper.isEmpty(line)) {
// skip if line is empty
continue;
}
// Increment counter
count++;
// Create POJO
model = factory.factory();
// Split the message according to the pair separator defined in
// annotated class @Message
List<String> result = Arrays.asList(line.split(separator));
if (result.size() == 0 || result.isEmpty()) {
throw new java.lang.IllegalArgumentException("No records have been defined in the KVP");
}
if (result.size() > 0) {
// Bind data from message with model classes
// Counter is used to detect line where error occurs
factory.bind(result, model, count, lists);
// Link objects together
factory.link(model);
// Add objects graph to the list
models.add(model);
LOG.debug("Graph of objects created: {}", model);
}
}
// If this is the case (correspond to an empty stream, ...)
if (models.size() == 0) {
throw new java.lang.IllegalArgumentException("No records have been defined in the CSV");
} else {
return extractUnmarshalResult(models);
}
} finally {
scanner.close();
IOHelper.close(in, "in", LOG);
}
}
use of java.util.Scanner in project flink by apache.
the class WebRuntimeMonitorITCase method testNoCopyFromJar.
/**
* Files are copied from the flink-dist jar to a temporary directory and
* then served from there. Only allow to copy files from <code>flink-dist.jar:/web</code>
*/
@Test
public void testNoCopyFromJar() throws Exception {
final Deadline deadline = TestTimeout.fromNow();
TestingCluster flink = null;
WebRuntimeMonitor webMonitor = null;
try {
flink = new TestingCluster(new Configuration());
flink.start(true);
webMonitor = startWebRuntimeMonitor(flink);
try (HttpTestClient client = new HttpTestClient("localhost", webMonitor.getServerPort())) {
String expectedIndex = new Scanner(new File(MAIN_RESOURCES_PATH + "/index.html")).useDelimiter("\\A").next();
// 1) Request index.html from web server
client.sendGetRequest("index.html", deadline.timeLeft());
HttpTestClient.SimpleHttpResponse response = client.getNextResponse(deadline.timeLeft());
assertEquals(HttpResponseStatus.OK, response.getStatus());
assertEquals(response.getType(), MimeTypes.getMimeTypeForExtension("html"));
assertEquals(expectedIndex, response.getContent());
// 2) Request file from class loader
client.sendGetRequest("../log4j-test.properties", deadline.timeLeft());
response = client.getNextResponse(deadline.timeLeft());
assertEquals("Returned status code " + response.getStatus() + " for file outside of web root.", HttpResponseStatus.NOT_FOUND, response.getStatus());
assertFalse("Did not respond with the file, but still copied it from the JAR.", new File(webMonitor.getBaseDir(new Configuration()), "log4j-test.properties").exists());
// 3) Request non-existing file
client.sendGetRequest("not-existing-resource", deadline.timeLeft());
response = client.getNextResponse(deadline.timeLeft());
assertEquals("Unexpected status code " + response.getStatus() + " for file outside of web root.", HttpResponseStatus.NOT_FOUND, response.getStatus());
}
} finally {
if (flink != null) {
flink.shutdown();
}
if (webMonitor != null) {
webMonitor.stop();
}
}
}
use of java.util.Scanner in project flink by apache.
the class WebRuntimeMonitorITCase method testStandaloneWebRuntimeMonitor.
/**
* Tests operation of the monitor in standalone operation.
*/
@Test
public void testStandaloneWebRuntimeMonitor() throws Exception {
final Deadline deadline = TestTimeout.fromNow();
TestingCluster flink = null;
WebRuntimeMonitor webMonitor = null;
try {
// Flink w/o a web monitor
flink = new TestingCluster(new Configuration());
flink.start(true);
webMonitor = startWebRuntimeMonitor(flink);
try (HttpTestClient client = new HttpTestClient("localhost", webMonitor.getServerPort())) {
String expected = new Scanner(new File(MAIN_RESOURCES_PATH + "/index.html")).useDelimiter("\\A").next();
// Request the file from the web server
client.sendGetRequest("index.html", deadline.timeLeft());
HttpTestClient.SimpleHttpResponse response = client.getNextResponse(deadline.timeLeft());
assertEquals(HttpResponseStatus.OK, response.getStatus());
assertEquals(response.getType(), MimeTypes.getMimeTypeForExtension("html"));
assertEquals(expected, response.getContent());
// Simple overview request
client.sendGetRequest("/overview", deadline.timeLeft());
response = client.getNextResponse(deadline.timeLeft());
assertEquals(HttpResponseStatus.OK, response.getStatus());
assertEquals(response.getType(), MimeTypes.getMimeTypeForExtension("json"));
assertTrue(response.getContent().contains("\"taskmanagers\":1"));
}
} finally {
if (flink != null) {
flink.shutdown();
}
if (webMonitor != null) {
webMonitor.stop();
}
}
}
use of java.util.Scanner in project hadoop by apache.
the class BlockPoolSlice method addReplicaToReplicasMap.
private void addReplicaToReplicasMap(Block block, ReplicaMap volumeMap, final RamDiskReplicaTracker lazyWriteReplicaMap, boolean isFinalized) throws IOException {
ReplicaInfo newReplica = null;
long blockId = block.getBlockId();
long genStamp = block.getGenerationStamp();
if (isFinalized) {
newReplica = new ReplicaBuilder(ReplicaState.FINALIZED).setBlockId(blockId).setLength(block.getNumBytes()).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(DatanodeUtil.idToBlockDir(finalizedDir, blockId)).build();
} else {
File file = new File(rbwDir, block.getBlockName());
boolean loadRwr = true;
File restartMeta = new File(file.getParent() + File.pathSeparator + "." + file.getName() + ".restart");
Scanner sc = null;
try {
sc = new Scanner(restartMeta, "UTF-8");
// The restart meta file exists
if (sc.hasNextLong() && (sc.nextLong() > timer.now())) {
// It didn't expire. Load the replica as a RBW.
// We don't know the expected block length, so just use 0
// and don't reserve any more space for writes.
newReplica = new ReplicaBuilder(ReplicaState.RBW).setBlockId(blockId).setLength(validateIntegrityAndSetLength(file, genStamp)).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(file.getParentFile()).setWriterThread(null).setBytesToReserve(0).build();
loadRwr = false;
}
sc.close();
if (!fileIoProvider.delete(volume, restartMeta)) {
FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " + restartMeta.getPath());
}
} catch (FileNotFoundException fnfe) {
// nothing to do hereFile dir =
} finally {
if (sc != null) {
sc.close();
}
}
// Restart meta doesn't exist or expired.
if (loadRwr) {
ReplicaBuilder builder = new ReplicaBuilder(ReplicaState.RWR).setBlockId(blockId).setLength(validateIntegrityAndSetLength(file, genStamp)).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(file.getParentFile());
newReplica = builder.build();
}
}
ReplicaInfo oldReplica = volumeMap.get(bpid, newReplica.getBlockId());
if (oldReplica == null) {
volumeMap.add(bpid, newReplica);
} else {
// We have multiple replicas of the same block so decide which one
// to keep.
newReplica = resolveDuplicateReplicas(newReplica, oldReplica, volumeMap);
}
// eventually.
if (newReplica.getVolume().isTransientStorage()) {
lazyWriteReplicaMap.addReplica(bpid, blockId, (FsVolumeImpl) newReplica.getVolume(), 0);
} else {
lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
}
if (oldReplica == null) {
incrNumBlocks();
}
}
use of java.util.Scanner in project hadoop by apache.
the class TestDFSShell method runCount.
private static void runCount(String path, long dirs, long files, FsShell shell) throws IOException {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bytes);
PrintStream oldOut = System.out;
System.setOut(out);
Scanner in = null;
String results = null;
try {
runCmd(shell, "-count", path);
results = bytes.toString();
in = new Scanner(results);
assertEquals(dirs, in.nextLong());
assertEquals(files, in.nextLong());
} finally {
System.setOut(oldOut);
if (in != null)
in.close();
IOUtils.closeStream(out);
System.out.println("results:\n" + results);
}
}
Aggregations