Search in sources :

Example 1 with FileReader

use of org.apache.cassandra.io.util.FileReader in project cassandra by apache.

the class CommitLogSegmentManagerCDCTest method testCompletedFlag.

@Test
public void testCompletedFlag() throws Throwable {
    String tableName = createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
    CommitLogSegment initialSegment = CommitLog.instance.segmentManager.allocatingFrom();
    testWithCDCSpaceInMb(8, () -> bulkWrite(tableName));
    CommitLog.instance.forceRecycleAllSegments();
    // Confirm index file is written
    File cdcIndexFile = initialSegment.getCDCIndexFile();
    Assert.assertTrue("Index file not written: " + cdcIndexFile, cdcIndexFile.exists());
    // Read index file and confirm second line is COMPLETED
    BufferedReader in = new BufferedReader(new FileReader(cdcIndexFile));
    String input = in.readLine();
    input = in.readLine();
    Assert.assertTrue("Expected COMPLETED in index file, got: " + input, input.equals("COMPLETED"));
    in.close();
}
Also used : BufferedReader(java.io.BufferedReader) FileReader(org.apache.cassandra.io.util.FileReader) File(org.apache.cassandra.io.util.File) Test(org.junit.Test)

Example 2 with FileReader

use of org.apache.cassandra.io.util.FileReader in project cassandra by apache.

the class SchemaCQLHelperTest method testSnapshot.

@Test
public void testSnapshot() throws Throwable {
    String typeA = createType("CREATE TYPE %s (a1 varint, a2 varint, a3 varint);");
    String typeB = createType("CREATE TYPE %s (b1 frozen<" + typeA + ">, b2 frozen<" + typeA + ">, b3 frozen<" + typeA + ">);");
    String typeC = createType("CREATE TYPE %s (c1 frozen<" + typeB + ">, c2 frozen<" + typeB + ">, c3 frozen<" + typeB + ">);");
    String tableName = createTable("CREATE TABLE IF NOT EXISTS %s (" + "pk1 varint," + "pk2 ascii," + "ck1 varint," + "ck2 varint," + "reg1 " + typeC + "," + "reg2 int," + "reg3 int," + "PRIMARY KEY ((pk1, pk2), ck1, ck2)) WITH " + "CLUSTERING ORDER BY (ck1 ASC, ck2 DESC);");
    alterTable("ALTER TABLE %s DROP reg3 USING TIMESTAMP 10000;");
    alterTable("ALTER TABLE %s ADD reg3 int;");
    // CREATE INDEX def_name_idx ON abc.def (name);
    createIndex("CREATE INDEX ON %s(reg2)");
    for (int i = 0; i < 10; i++) execute("INSERT INTO %s (pk1, pk2, ck1, ck2, reg1, reg2) VALUES (?, ?, ?, ?, ?, ?)", i, i + 1, i + 2, i + 3, null, i + 5);
    ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName);
    cfs.snapshot(SNAPSHOT);
    String schema = Files.toString(cfs.getDirectories().getSnapshotSchemaFile(SNAPSHOT).toJavaIOFile(), Charset.defaultCharset());
    assertThat(schema, allOf(containsString(String.format("CREATE TYPE IF NOT EXISTS %s.%s (\n" + "    a1 varint,\n" + "    a2 varint,\n" + "    a3 varint\n" + ");", keyspace(), typeA)), containsString(String.format("CREATE TYPE IF NOT EXISTS %s.%s (\n" + "    a1 varint,\n" + "    a2 varint,\n" + "    a3 varint\n" + ");", keyspace(), typeA)), containsString(String.format("CREATE TYPE IF NOT EXISTS %s.%s (\n" + "    b1 frozen<%s>,\n" + "    b2 frozen<%s>,\n" + "    b3 frozen<%s>\n" + ");", keyspace(), typeB, typeA, typeA, typeA)), containsString(String.format("CREATE TYPE IF NOT EXISTS %s.%s (\n" + "    c1 frozen<%s>,\n" + "    c2 frozen<%s>,\n" + "    c3 frozen<%s>\n" + ");", keyspace(), typeC, typeB, typeB, typeB))));
    // trim to ensure order
    schema = schema.substring(schema.indexOf("CREATE TABLE"));
    String expected = "CREATE TABLE IF NOT EXISTS " + keyspace() + "." + tableName + " (\n" + "    pk1 varint,\n" + "    pk2 ascii,\n" + "    ck1 varint,\n" + "    ck2 varint,\n" + "    reg2 int,\n" + "    reg1 " + typeC + ",\n" + "    reg3 int,\n" + "    PRIMARY KEY ((pk1, pk2), ck1, ck2)\n" + ") WITH ID = " + cfs.metadata.id + "\n" + "    AND CLUSTERING ORDER BY (ck1 ASC, ck2 DESC)";
    assertThat(schema, allOf(startsWith(expected), containsString("ALTER TABLE " + keyspace() + "." + tableName + " DROP reg3 USING TIMESTAMP 10000;"), containsString("ALTER TABLE " + keyspace() + "." + tableName + " ADD reg3 int;")));
    assertThat(schema, containsString("CREATE INDEX IF NOT EXISTS " + tableName + "_reg2_idx ON " + keyspace() + '.' + tableName + " (reg2);"));
    JSONObject manifest = (JSONObject) new JSONParser().parse(new FileReader(cfs.getDirectories().getSnapshotManifestFile(SNAPSHOT)));
    JSONArray files = (JSONArray) manifest.get("files");
    // two files, the second is index
    Assert.assertEquals(2, files.size());
}
Also used : JSONObject(org.json.simple.JSONObject) JSONArray(org.json.simple.JSONArray) JSONParser(org.json.simple.parser.JSONParser) FileReader(org.apache.cassandra.io.util.FileReader) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Test(org.junit.Test)

Example 3 with FileReader

use of org.apache.cassandra.io.util.FileReader in project cassandra by apache.

the class CloudstackSnitch method csEndpointFromLease.

String csEndpointFromLease(File lease) throws ConfigurationException {
    String line;
    String endpoint = null;
    Pattern identifierPattern = Pattern.compile("^[ \t]*option dhcp-server-identifier (.*);$");
    try (BufferedReader reader = new BufferedReader(new FileReader(lease))) {
        while ((line = reader.readLine()) != null) {
            Matcher matcher = identifierPattern.matcher(line);
            if (matcher.find()) {
                endpoint = matcher.group(1);
                break;
            }
        }
    } catch (Exception e) {
        throw new ConfigurationException("CloudstackSnitch cannot access lease file.");
    }
    if (endpoint == null) {
        throw new ConfigurationException("No metadata server could be found in lease file.");
    }
    return "http://" + endpoint;
}
Also used : Pattern(java.util.regex.Pattern) Matcher(java.util.regex.Matcher) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) BufferedReader(java.io.BufferedReader) FileReader(org.apache.cassandra.io.util.FileReader) IOException(java.io.IOException) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException)

Example 4 with FileReader

use of org.apache.cassandra.io.util.FileReader in project cassandra by apache.

the class CommitLogSegmentManagerCDCTest method testCDCIndexFileWriteOnSync.

@Test
public void testCDCIndexFileWriteOnSync() throws IOException {
    createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
    new RowUpdateBuilder(currentTableMetadata(), 0, 1).add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 3)).build().apply();
    CommitLog.instance.sync(true);
    CommitLogSegment currentSegment = CommitLog.instance.segmentManager.allocatingFrom();
    int syncOffset = currentSegment.lastSyncedOffset;
    // Confirm index file is written
    File cdcIndexFile = currentSegment.getCDCIndexFile();
    Assert.assertTrue("Index file not written: " + cdcIndexFile, cdcIndexFile.exists());
    // Read index value and confirm it's == end from last sync
    BufferedReader in = new BufferedReader(new FileReader(cdcIndexFile));
    String input = in.readLine();
    Integer offset = Integer.parseInt(input);
    Assert.assertEquals(syncOffset, (long) offset);
    in.close();
}
Also used : RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) BufferedReader(java.io.BufferedReader) FileReader(org.apache.cassandra.io.util.FileReader) File(org.apache.cassandra.io.util.File) Test(org.junit.Test)

Aggregations

FileReader (org.apache.cassandra.io.util.FileReader)4 BufferedReader (java.io.BufferedReader)3 Test (org.junit.Test)3 File (org.apache.cassandra.io.util.File)2 IOException (java.io.IOException)1 Matcher (java.util.regex.Matcher)1 Pattern (java.util.regex.Pattern)1 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)1 ConfigurationException (org.apache.cassandra.exceptions.ConfigurationException)1 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)1 JSONArray (org.json.simple.JSONArray)1 JSONObject (org.json.simple.JSONObject)1 JSONParser (org.json.simple.parser.JSONParser)1