Search in sources :

Example 1 with ZNRecordStreamingSerializer

use of org.apache.helix.manager.zk.ZNRecordStreamingSerializer in project helix by apache.

the class TestJacksonPayloadSerializer method testFullZNRecordStreamingSerializeDeserialize.

/**
 * Test that the payload can be deserialized after serializing and deserializing the ZNRecord
 * that encloses it. This uses ZNRecordStreamingSerializer.
 */
@Test
public void testFullZNRecordStreamingSerializeDeserialize() {
    final String RECORD_ID = "testFullZNRecordStreamingSerializeDeserialize";
    SampleDeserialized sample = getSample();
    ZNRecord znRecord = new ZNRecord(RECORD_ID);
    znRecord.setPayloadSerializer(new JacksonPayloadSerializer());
    znRecord.setPayload(sample);
    ZNRecordStreamingSerializer znRecordSerializer = new ZNRecordStreamingSerializer();
    byte[] serialized = znRecordSerializer.serialize(znRecord);
    ZNRecord deserialized = (ZNRecord) znRecordSerializer.deserialize(serialized);
    deserialized.setPayloadSerializer(new JacksonPayloadSerializer());
    SampleDeserialized duplicate = deserialized.getPayload(SampleDeserialized.class);
    Assert.assertEquals(duplicate, sample);
}
Also used : ZNRecordStreamingSerializer(org.apache.helix.manager.zk.ZNRecordStreamingSerializer) ZNRecord(org.apache.helix.ZNRecord) Test(org.testng.annotations.Test)

Example 2 with ZNRecordStreamingSerializer

use of org.apache.helix.manager.zk.ZNRecordStreamingSerializer in project helix by apache.

the class TestJacksonPayloadSerializer method testRawPayloadMissingIfUnspecified.

/**
 * Test that the payload is not included whenever it is not null. This is mainly to maintain
 * backward
 * compatibility.
 */
@Test
public void testRawPayloadMissingIfUnspecified() {
    final String RECORD_ID = "testRawPayloadMissingIfUnspecified";
    ZNRecord znRecord = new ZNRecord(RECORD_ID);
    ZNRecordSerializer znRecordSerializer = new ZNRecordSerializer();
    byte[] serialized = znRecordSerializer.serialize(znRecord);
    ZNRecordStreamingSerializer znRecordStreamingSerializer = new ZNRecordStreamingSerializer();
    byte[] streamingSerialized = znRecordStreamingSerializer.serialize(znRecord);
    ObjectMapper mapper = new ObjectMapper();
    try {
        JsonNode jsonNode = mapper.readTree(new String(serialized));
        Assert.assertFalse(jsonNode.has("rawPayload"));
        JsonNode streamingJsonNode = mapper.readTree(new String(streamingSerialized));
        Assert.assertFalse(streamingJsonNode.has("rawPayload"));
    } catch (JsonProcessingException e) {
        Assert.fail();
    } catch (IOException e) {
        Assert.fail();
    }
}
Also used : ZNRecordStreamingSerializer(org.apache.helix.manager.zk.ZNRecordStreamingSerializer) JsonNode(org.codehaus.jackson.JsonNode) IOException(java.io.IOException) JsonProcessingException(org.codehaus.jackson.JsonProcessingException) ZNRecord(org.apache.helix.ZNRecord) ObjectMapper(org.codehaus.jackson.map.ObjectMapper) ZNRecordSerializer(org.apache.helix.manager.zk.ZNRecordSerializer) Test(org.testng.annotations.Test)

Example 3 with ZNRecordStreamingSerializer

use of org.apache.helix.manager.zk.ZNRecordStreamingSerializer in project helix by apache.

the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordStreamingSerializer.

@Test
public void testZNRecordSizeLimitUseZNRecordStreamingSerializer() {
    String className = getShortClassName();
    System.out.println("START testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
    ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
    ZkClient zkClient = new ZkClient(ZK_ADDR);
    zkClient.setZkSerializer(serializer);
    String root = className;
    byte[] buf = new byte[1024];
    for (int i = 0; i < 1024; i++) {
        buf[i] = 'a';
    }
    String bufStr = new String(buf);
    // test zkClient
    // legal-sized data gets written to zk
    // write a znode of size less than 1m
    final ZNRecord smallRecord = new ZNRecord("normalsize");
    smallRecord.getSimpleFields().clear();
    for (int i = 0; i < 900; i++) {
        smallRecord.setSimpleField(i + "", bufStr);
    }
    String path1 = "/" + root + "/test1";
    zkClient.createPersistent(path1, true);
    zkClient.writeData(path1, smallRecord);
    ZNRecord record = zkClient.readData(path1);
    Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
    // oversized data doesn't create any data on zk
    // prepare a znode of size larger than 1m
    final ZNRecord largeRecord = new ZNRecord("oversize");
    largeRecord.getSimpleFields().clear();
    for (int i = 0; i < 1024; i++) {
        largeRecord.setSimpleField(i + "", bufStr);
    }
    String path2 = "/" + root + "/test2";
    zkClient.createPersistent(path2, true);
    try {
        zkClient.writeData(path2, largeRecord);
    } catch (HelixException e) {
        Assert.fail("Should not fail because data size is larger than 1M since compression applied");
    }
    record = zkClient.readData(path2);
    Assert.assertNotNull(record);
    // oversized write doesn't overwrite existing data on zk
    record = zkClient.readData(path1);
    try {
        zkClient.writeData(path1, largeRecord);
    } catch (HelixException e) {
        Assert.fail("Should not fail because data size is larger than 1M since compression applied");
    }
    ZNRecord recordNew = zkClient.readData(path1);
    byte[] arr = serializer.serialize(record);
    byte[] arrNew = serializer.serialize(recordNew);
    Assert.assertFalse(Arrays.equals(arr, arrNew));
    // test ZkDataAccessor
    ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
    admin.addCluster(className, true);
    InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
    admin.addInstance(className, instanceConfig);
    // oversized data should not create any new data on zk
    ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
    Builder keyBuilder = accessor.keyBuilder();
    // ZNRecord statusUpdates = new ZNRecord("statusUpdates");
    IdealState idealState = new IdealState("currentState");
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 0; i < 1024; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_1"), idealState);
    Assert.assertTrue(succeed);
    HelixProperty property = accessor.getProperty(keyBuilder.idealStates("TestDB_1"));
    Assert.assertNotNull(property);
    // legal sized data gets written to zk
    idealState.getRecord().getSimpleFields().clear();
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 0; i < 900; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_2"), idealState);
    Assert.assertTrue(succeed);
    record = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
    Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
    // oversized data should not update existing data on zk
    idealState.getRecord().getSimpleFields().clear();
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 900; i < 1024; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    // System.out.println("record: " + idealState.getRecord());
    succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB_2"), idealState);
    Assert.assertTrue(succeed);
    recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
    arr = serializer.serialize(record);
    arrNew = serializer.serialize(recordNew);
    Assert.assertFalse(Arrays.equals(arr, arrNew));
    System.out.println("END testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) ZkBaseDataAccessor(org.apache.helix.manager.zk.ZkBaseDataAccessor) ZNRecordStreamingSerializer(org.apache.helix.manager.zk.ZNRecordStreamingSerializer) Builder(org.apache.helix.PropertyKey.Builder) Date(java.util.Date) IdealState(org.apache.helix.model.IdealState) HelixException(org.apache.helix.HelixException) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) InstanceConfig(org.apache.helix.model.InstanceConfig) HelixProperty(org.apache.helix.HelixProperty) ZNRecord(org.apache.helix.ZNRecord) ZKHelixDataAccessor(org.apache.helix.manager.zk.ZKHelixDataAccessor) Test(org.testng.annotations.Test)

Aggregations

ZNRecord (org.apache.helix.ZNRecord)3 ZNRecordStreamingSerializer (org.apache.helix.manager.zk.ZNRecordStreamingSerializer)3 Test (org.testng.annotations.Test)3 IOException (java.io.IOException)1 Date (java.util.Date)1 HelixException (org.apache.helix.HelixException)1 HelixProperty (org.apache.helix.HelixProperty)1 Builder (org.apache.helix.PropertyKey.Builder)1 ZKHelixAdmin (org.apache.helix.manager.zk.ZKHelixAdmin)1 ZKHelixDataAccessor (org.apache.helix.manager.zk.ZKHelixDataAccessor)1 ZNRecordSerializer (org.apache.helix.manager.zk.ZNRecordSerializer)1 ZkBaseDataAccessor (org.apache.helix.manager.zk.ZkBaseDataAccessor)1 ZkClient (org.apache.helix.manager.zk.ZkClient)1 IdealState (org.apache.helix.model.IdealState)1 InstanceConfig (org.apache.helix.model.InstanceConfig)1 JsonNode (org.codehaus.jackson.JsonNode)1 JsonProcessingException (org.codehaus.jackson.JsonProcessingException)1 ObjectMapper (org.codehaus.jackson.map.ObjectMapper)1