use of java.sql.Blob in project derby by apache.
the class BlobAccessTest method testFetchLargeBlobs.
/**
* Fetches a number of Blobs using a rather large read buffer with
* {@code getBinaryStream}.
*/
public void testFetchLargeBlobs() throws IOException, SQLException {
PreparedStatement ps = prepareStatement("select dBlob, length from largeBlobs");
ResultSet rs = ps.executeQuery();
// 16 KB
byte[] byteBuf = new byte[16 * 1024];
while (rs.next()) {
Blob Blob = rs.getBlob(1);
InputStream content = Blob.getBinaryStream();
long remaining = rs.getInt(2);
while (remaining > 0) {
remaining -= content.read(byteBuf);
}
content.close();
}
rs.close();
}
use of java.sql.Blob in project derby by apache.
the class BlobAccessTest method testFetchSmallBlobs.
/**
* Fetches a number of small Blobs, getting the content using getBytes.
* <p>
* The exact length of the Blob is used when getting the bytes.
*/
public void testFetchSmallBlobs() throws SQLException {
PreparedStatement ps = prepareStatement("select dBlob, length from smallBlobs");
ResultSet rs = ps.executeQuery();
while (rs.next()) {
Blob Blob = rs.getBlob(1);
int blobLength = rs.getInt(2);
byte[] content = Blob.getBytes(1, blobLength);
}
rs.close();
}
use of java.sql.Blob in project derby by apache.
the class BlobAccessTest method fetchBlobPieceByPiece.
/**
* Fetches a "large" Blob piece by piece using getBytes.
*
* @param modifyBlob whether to modify the Blob before fetching it
* (determines the internal Derby Blob representation)
*/
private void fetchBlobPieceByPiece(boolean modifyBlob, int fetchMode) throws IOException, SQLException {
// Select just one Blob.
PreparedStatement ps = prepareStatement("select dBlob, length from largeBlobs where id = 4");
ResultSet rs = ps.executeQuery();
while (rs.next()) {
Blob blob = rs.getBlob(1);
long remaining = rs.getInt(2);
if (modifyBlob) {
// Modify the Blob to create a temporary copy in memory or on
// disk (depends on the Blob size).
long modifyStart = System.currentTimeMillis();
blob.setBytes(++remaining, new byte[] { (byte) 'X' });
println("Blob modification duration: " + (System.currentTimeMillis() - modifyStart) + " ms");
}
long pos = 1;
int MAX_SIZE = 32676;
switch(fetchMode) {
case FETCH_GETBYTES:
while (remaining > 0) {
byte[] bytes = blob.getBytes(pos, (int) Math.min(MAX_SIZE, remaining));
pos += bytes.length;
remaining -= bytes.length;
}
break;
case FETCH_GETBINARYSTREAM:
InputStream stream = blob.getBinaryStream();
byte[] buf = new byte[MAX_SIZE];
while (remaining > 0) {
int read = stream.read(buf);
pos += read;
remaining -= read;
}
stream.close();
break;
default:
fail("Unknown fetch mode: " + fetchMode);
}
}
rs.close();
}
use of java.sql.Blob in project derby by apache.
the class LOBLocatorReleaseTest method forwardOnlyTest.
private void forwardOnlyTest(String table) throws SQLException {
final String sql = "select dBlob, dClob from " + table;
getConnection().setAutoCommit(false);
// Just loop through.
Statement stmt = createStatement();
ResultSet rs = stmt.executeQuery(sql);
while (rs.next()) {
// Just iterate through.
}
rs.close();
// Loop through and get references to some of the LOBs.
// When you get a LOB reference, the locator shuold only be freed on
// explicit calls to free (requires Java SE 6) or commit/rollback.
rs = stmt.executeQuery(sql);
int index = 0;
while (rs.next()) {
if (index % 2 == 0) {
Blob b = rs.getBlob(1);
if (!rs.wasNull()) {
b.length();
}
}
if (index % 3 == 0) {
Clob c = rs.getClob(2);
if (!rs.wasNull()) {
c.length();
}
}
// Clear all LOB mappings after 10 rows.
if (index == 9) {
commit();
}
index++;
}
rs.close();
stmt.close();
// Close the statement after a few rows.
stmt = createStatement();
rs = stmt.executeQuery(sql);
rs.next();
rs.next();
stmt.close();
// The LOB mapping is cleared on a commit.
commit();
// Close the result set after a few rows and a rollback.
stmt = createStatement();
rs = stmt.executeQuery(sql);
rs.next();
rs.next();
rollback();
rs.close();
}
use of java.sql.Blob in project derby by apache.
the class BlobSetMethodsTest method testSetBytesLargeBlob.
/**
* Tests large blob (more than 4k) to ensure LOBStreamControl uses file.
*/
public void testSetBytesLargeBlob() throws SQLException {
Connection con = getConnection();
con.setAutoCommit(false);
PreparedStatement pstmt = con.prepareStatement("insert into " + "blobtest (id, data) values (?,?)");
Blob blob = con.createBlob();
byte[] data = new byte[BUFFER_SIZE];
for (int i = 0; i < BUFFER_SIZE; i++) {
data[i] = (byte) (i % 255);
}
// now add more than 4k so file get in use
for (int i = 0; i < 5; i++) blob.setBytes(i * BUFFER_SIZE + 1, data);
assertEquals(BUFFER_SIZE * 5, blob.length());
// update blob in the middle
byte[] data1 = new byte[UPDATE_SIZE];
for (int i = 0; i < UPDATE_SIZE; i++) // just any value
data1[i] = 120;
blob.setBytes(BUFFER_SIZE + 1, data1);
blob.setBytes(BUFFER_SIZE * 5 + 1, data1);
assertEquals(5 * BUFFER_SIZE + UPDATE_SIZE, blob.length());
// insert it into table
pstmt.setInt(1, 3);
pstmt.setBlob(2, blob);
pstmt.executeUpdate();
Statement stmt = con.createStatement();
ResultSet rs = stmt.executeQuery("select data from blobtest where " + "id = 3");
assertEquals(true, rs.next());
blob = rs.getBlob(1);
byte[] data2 = blob.getBytes(BUFFER_SIZE + 1, UPDATE_SIZE);
assertEquals(5 * BUFFER_SIZE + UPDATE_SIZE, blob.length());
for (int i = 0; i < UPDATE_SIZE; i++) assertEquals(data1[i], data2[i]);
data2 = blob.getBytes(5 * BUFFER_SIZE + 1, UPDATE_SIZE);
for (int i = 0; i < UPDATE_SIZE; i++) assertEquals(data1[i], data2[i]);
// test truncate
blob.truncate(BUFFER_SIZE);
assertEquals("truncate failed", BUFFER_SIZE, blob.length());
rs.close();
con.commit();
stmt.close();
pstmt.close();
}
Aggregations