-
Notifications
You must be signed in to change notification settings - Fork 12
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Support for JSONConverter in sink connector #71
Changes from all commits
f03cd16
659c761
734a928
a426313
107b0e3
1e7ab83
ec2587f
3fdee9d
da856f7
5583155
0231864
ac8b55d
7585eb7
35f30ce
8e5e88b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,30 +15,24 @@ | |
|
||
import com.ibm.cloud.cloudant.internal.ServiceFactory; | ||
import com.ibm.cloud.cloudant.v1.Cloudant; | ||
import com.ibm.cloud.cloudant.v1.model.BulkDocs; | ||
import com.ibm.cloud.cloudant.v1.model.Document; | ||
import com.ibm.cloud.cloudant.v1.model.DocumentResult; | ||
import com.ibm.cloud.cloudant.v1.model.PostBulkDocsOptions; | ||
import com.ibm.cloud.cloudant.v1.model.PutDatabaseOptions; | ||
import com.ibm.cloud.cloudant.v1.model.*; | ||
import com.ibm.cloud.sdk.core.service.exception.ServiceResponseException; | ||
import com.ibm.cloudant.kafka.common.CloudantConst; | ||
import com.ibm.cloudant.kafka.common.InterfaceConst; | ||
import com.ibm.cloudant.kafka.common.MessageKey; | ||
|
||
import org.slf4j.Logger; | ||
import org.slf4j.LoggerFactory; | ||
import org.json.JSONArray; | ||
import org.json.JSONException; | ||
import org.json.JSONObject; | ||
import org.slf4j.Logger; | ||
import org.slf4j.LoggerFactory; | ||
|
||
import java.io.IOException; | ||
import java.io.InputStream; | ||
import java.net.MalformedURLException; | ||
import java.util.ArrayList; | ||
import java.util.List; | ||
import java.util.Locale; | ||
import java.util.Map; | ||
import java.util.Properties; | ||
import java.util.stream.Collectors; | ||
|
||
public class JavaCloudantUtil { | ||
|
||
|
@@ -67,7 +61,7 @@ public class JavaCloudantUtil { | |
); | ||
} | ||
|
||
public static JSONArray batchWrite(Map<String, String> props, JSONArray data) | ||
public static JSONArray batchWrite(Map<String, String> props, List<Map<String, Object>> data) | ||
throws JSONException { | ||
// wrap result to JSONArray | ||
JSONArray result = new JSONArray(); | ||
|
@@ -76,13 +70,7 @@ public static JSONArray batchWrite(Map<String, String> props, JSONArray data) | |
// get client object | ||
Cloudant service = getClientInstance(props); | ||
|
||
List<Document> listOfDocs = new ArrayList<>(); | ||
for(int i=0; i < data.length(); i++){ | ||
Map<String, Object> docProperties = data.getJSONObject(i).toMap(); | ||
Document doc = new Document(); | ||
doc.setProperties(docProperties); | ||
listOfDocs.add(doc); | ||
} | ||
List<Document> listOfDocs = data.stream().map(d -> {Document doc = new Document(); doc.setProperties(d); return doc; }).collect(Collectors.toList()); | ||
|
||
// attempt to create database | ||
createTargetDb(service, props.get(InterfaceConst.DB)); | ||
|
@@ -116,6 +104,7 @@ public static JSONArray batchWrite(Map<String, String> props, JSONArray data) | |
result.put(jsonResult); | ||
} | ||
} catch (Exception e) { | ||
LOG.error("Exception caught in batchWrite()", e); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This may need to be revisited in another PR - the worrying thing is that we were just swallowing exceptions from the cloudant client which I had manage to trigger with a misconfigured test. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Agreed, there needs to be a separate look at error handling to conform to the behaviours of the built-in There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we open a ticket specifically for investigating and improving error handling? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I made a note in my error handling epic, I think strictly speaking we should iterate the result and push to the DLQ or whatever error handling is configured specifically for each document/message, but I'm ok with us improving that later. |
||
if(e.getMessage().equals(String.format(ResourceBundleUtil.get( | ||
MessageKey.CLOUDANT_LIMITATION)))){ | ||
// try to put items from jsonResult before exception occurred | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,103 @@ | ||
package com.ibm.cloudant.kafka.schema; | ||
|
||
import org.apache.kafka.connect.connector.ConnectRecord; | ||
import org.apache.kafka.connect.data.Field; | ||
import org.apache.kafka.connect.data.Schema; | ||
import org.apache.kafka.connect.data.Schema.Type; | ||
import org.apache.kafka.connect.data.Struct; | ||
import org.slf4j.Logger; | ||
import org.slf4j.LoggerFactory; | ||
|
||
import java.util.HashMap; | ||
import java.util.Map; | ||
import java.util.function.Function; | ||
|
||
public class ConnectRecordMapper<R extends ConnectRecord<R>> implements Function<ConnectRecord<R>, Map<String, Object>> { | ||
|
||
private static Logger LOG = LoggerFactory.getLogger(ConnectRecordMapper.class); | ||
|
||
public Map<String, Object> apply(ConnectRecord<R> record) { | ||
// we can convert from a struct or a map - assume a map when a value schema is not provided | ||
Schema.Type schemaType = record.valueSchema() == null ? Schema.Type.MAP : record.valueSchema().type(); | ||
Map<String, Object> toReturn = new HashMap<>(); | ||
switch (schemaType) { | ||
case MAP: | ||
if (record.value() instanceof Map) { | ||
return convertMap((Map) record.value(), toReturn); | ||
} else { | ||
throw new IllegalArgumentException(String.format("Type %s not supported with schema of type Map (or no schema)", | ||
record.value().getClass())); | ||
} | ||
case STRUCT: | ||
if (record.value() instanceof Struct) { | ||
return convertStruct((Struct) record.value(), toReturn); | ||
} else { | ||
throw new IllegalArgumentException(String.format("Type %s not supported with schema of type Struct", | ||
record.value().getClass())); | ||
} | ||
default: | ||
throw new IllegalArgumentException(String.format("Schema type %s not supported", record.valueSchema().type())); | ||
} | ||
} | ||
|
||
// convert struct to map by adding key/values to passed in map, and returning it | ||
private Map<String, Object> convertStruct(Struct struct, Map<String, Object> outMap) { | ||
Schema schema = struct.schema(); | ||
|
||
// iterate fields and add to map | ||
for (Field f : schema.fields()) { | ||
Object value = struct.get(f); | ||
outMap.put(f.name(), getField(f.schema().type(), value)); | ||
} | ||
return outMap; | ||
} | ||
|
||
// convert kafka map to map by adding key/values to passed in map, and returning it | ||
private Map<String, Object> convertMap(Map inMap, Map<String, Object> outMap) { | ||
|
||
for (Object k : inMap.keySet()) { | ||
if (k instanceof String) { | ||
Object v = inMap.get(k); | ||
if (v instanceof Map) { | ||
outMap.put((String)k, convertMap((Map)v, new HashMap<>())); | ||
} else if (v instanceof Struct) { | ||
outMap.put((String)k, convertStruct((Struct)v, new HashMap<>())); | ||
} else { | ||
// assume that JSON serialiser knows how to deal with it | ||
outMap.put((String)k, v); | ||
} | ||
} else { | ||
throw new IllegalArgumentException("unsupported type in map key " + k.getClass()); | ||
} | ||
} | ||
return outMap; | ||
} | ||
|
||
// get field value, recursing if necessary for struct types | ||
private Object getField(Type type, Object value) { | ||
|
||
switch (type) { | ||
// primitive types: just return value (JSON serialiser will deal with conversion later) | ||
case ARRAY: | ||
case BOOLEAN: | ||
case BYTES: | ||
case FLOAT32: | ||
case FLOAT64: | ||
case INT16: | ||
case INT32: | ||
case INT64: | ||
case INT8: | ||
case STRING: | ||
return value; | ||
// map/struct cases: chain a new map onto this one, as the value, and recursively fill in its contents | ||
case MAP: | ||
return convertMap((Map)value, new HashMap<>()); | ||
case STRUCT: | ||
return convertStruct((Struct)value, new HashMap<>()); | ||
default: | ||
throw new IllegalArgumentException("unknown type " + type); | ||
} | ||
|
||
} | ||
|
||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
source connector converter needs covering when we do the PR for that work - my intention is that we support JsonConverter on source and sink which simplifies things (as mentioned above it's the default anyway so no need to explicitly set it in config)