use of org.apache.commons.io.input.CountingInputStream in project AntennaPod by AntennaPod.
the class Id3ReaderTest method testReadFrameHeader.
@Test
public void testReadFrameHeader() throws IOException {
byte[] data = generateFrameHeader("CHAP", 42);
CountingInputStream inputStream = new CountingInputStream(new ByteArrayInputStream(data));
FrameHeader header = new ID3Reader(inputStream).readFrameHeader();
assertEquals("CHAP", header.getId());
assertEquals(42, header.getSize());
}
use of org.apache.commons.io.input.CountingInputStream in project AntennaPod by AntennaPod.
the class Id3ReaderTest method testReadTagHeader.
@Test
public void testReadTagHeader() throws IOException, ID3ReaderException {
byte[] data = generateId3Header(23);
CountingInputStream inputStream = new CountingInputStream(new ByteArrayInputStream(data));
TagHeader header = new ID3Reader(inputStream).readTagHeader();
assertEquals("ID3", header.getId());
assertEquals(42, header.getVersion());
assertEquals(23, header.getSize());
}
use of org.apache.commons.io.input.CountingInputStream in project AntennaPod by AntennaPod.
the class ChapterReaderTest method testReadFullTagWithChapter.
@Test
public void testReadFullTagWithChapter() throws IOException, ID3ReaderException {
byte[] chapter = Id3ReaderTest.concat(Id3ReaderTest.generateFrameHeader(ChapterReader.FRAME_ID_CHAPTER, CHAPTER_WITHOUT_SUBFRAME.length), CHAPTER_WITHOUT_SUBFRAME);
byte[] data = Id3ReaderTest.concat(Id3ReaderTest.generateId3Header(chapter.length), chapter);
CountingInputStream inputStream = new CountingInputStream(new ByteArrayInputStream(data));
ChapterReader reader = new ChapterReader(inputStream);
reader.readInputStream();
assertEquals(1, reader.getChapters().size());
assertEquals(CHAPTER_WITHOUT_SUBFRAME_START_TIME, reader.getChapters().get(0).getStart());
}
use of org.apache.commons.io.input.CountingInputStream in project indy by Commonjava.
the class DelayedDownload method run.
@Override
public void run() {
Logger logger = LoggerFactory.getLogger(getClass());
logger.info("Starting: {}", Thread.currentThread().getName());
if (initialDelay > 0) {
logger.info("Delaying: {}", initialDelay);
try {
Thread.sleep(initialDelay);
} catch (final InterruptedException e) {
return;
}
}
startTime = System.nanoTime();
content = new ByteArrayOutputStream();
logger.info("Trying: {}", Thread.currentThread().getName());
try (InputStream in = client.content().get(key, path)) {
if (in == null) {
missing = true;
} else {
CountingInputStream cin = new CountingInputStream(in);
IOUtils.copy(cin, content);
logger.debug("Read: {} bytes", cin.getByteCount());
}
} catch (IndyClientException | IOException e) {
e.printStackTrace();
}
endTime = System.nanoTime();
latch.countDown();
logger.info("Stopping: {}", Thread.currentThread().getName());
}
use of org.apache.commons.io.input.CountingInputStream in project jackrabbit by apache.
the class DbDataStore method addRecord.
public DataRecord addRecord(InputStream stream) throws DataStoreException {
InputStream fileInput = null;
String tempId = null;
ResultSet rs = null;
try {
long tempModified;
while (true) {
try {
tempModified = System.currentTimeMillis();
String id = UUID.randomUUID().toString();
tempId = TEMP_PREFIX + id;
temporaryInUse.add(tempId);
// SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=?
rs = conHelper.query(selectMetaSQL, tempId);
boolean hasNext = rs.next();
DbUtility.close(rs);
rs = null;
if (hasNext) {
// re-try in the very, very unlikely event that the row already exists
continue;
}
// INSERT INTO DATASTORE VALUES(?, 0, ?, NULL)
conHelper.exec(insertTempSQL, tempId, tempModified);
break;
} catch (Exception e) {
throw convert("Can not insert new record", e);
} finally {
DbUtility.close(rs);
// prevent that rs.close() is called again
rs = null;
}
}
MessageDigest digest = getDigest();
DigestInputStream dIn = new DigestInputStream(stream, digest);
CountingInputStream in = new CountingInputStream(dIn);
StreamWrapper wrapper;
if (STORE_SIZE_MINUS_ONE.equals(storeStream)) {
wrapper = new StreamWrapper(in, -1);
} else if (STORE_SIZE_MAX.equals(storeStream)) {
wrapper = new StreamWrapper(in, Integer.MAX_VALUE);
} else if (STORE_TEMP_FILE.equals(storeStream)) {
File temp = moveToTempFile(in);
long length = temp.length();
wrapper = new StreamWrapper(new ResettableTempFileInputStream(temp), length);
} else {
throw new DataStoreException("Unsupported stream store algorithm: " + storeStream);
}
// UPDATE DATASTORE SET DATA=? WHERE ID=?
conHelper.exec(updateDataSQL, wrapper, tempId);
long length = in.getByteCount();
DataIdentifier identifier = new DataIdentifier(encodeHexString(digest.digest()));
usesIdentifier(identifier);
String id = identifier.toString();
long newModified;
while (true) {
newModified = System.currentTimeMillis();
if (checkExisting(tempId, length, identifier)) {
touch(identifier, newModified);
conHelper.exec(deleteSQL, tempId);
break;
}
try {
// UPDATE DATASTORE SET ID=?, LENGTH=?, LAST_MODIFIED=?
// WHERE ID=? AND LAST_MODIFIED=?
int count = conHelper.update(updateSQL, id, length, newModified, tempId, tempModified);
// collection could delete rows)
if (count != 0) {
// update was successful
break;
}
} catch (SQLException e) {
// duplicate key (the row already exists) - repeat
// we use exception handling for flow control here, which is bad,
// but the alternative is to use UPDATE ... WHERE ... (SELECT ...)
// which could cause a deadlock in some databases - also,
// duplicate key will only occur if somebody else concurrently
// added the same record (which is very unlikely)
}
// SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=?
rs = conHelper.query(selectMetaSQL, tempId);
if (!rs.next()) {
// the row was deleted, which is unexpected / not allowed
String msg = DIGEST + " temporary entry deleted: " + " id=" + tempId + " length=" + length;
log.error(msg);
throw new DataStoreException(msg);
}
tempModified = rs.getLong(2);
DbUtility.close(rs);
rs = null;
}
usesIdentifier(identifier);
DbDataRecord record = new DbDataRecord(this, identifier, length, newModified);
return record;
} catch (Exception e) {
throw convert("Can not insert new record", e);
} finally {
if (tempId != null) {
temporaryInUse.remove(tempId);
}
DbUtility.close(rs);
if (fileInput != null) {
try {
fileInput.close();
} catch (IOException e) {
throw convert("Can not close temporary file", e);
}
}
}
}
Aggregations