-
Notifications
You must be signed in to change notification settings - Fork 70
Description
Hi,
Does AmazonS3MoveCleanUpPolicy("fs. cleanup.policy-move.failure.aws.bucket.name": "”) configuration support storing the entire file to the error path when issue occur or does it store only the problematic records?
"name": "s3_file_pulse_connector",
"config": {
"connector class": "io. streamthoughts.kafka.connect.filepulse.source.FilePulseSourceConnector",
"topic": "",
"tasks.max": "1",
"tasks. reader.class": "io.streamthoughts.kafka.connect.filepulse.fs.reader.AmazonS3ROwFileInputReader",
"fs.listing.class": "io.streamthoughts.kafka.connect.filepulse.fs.AmazonS3FileSystemListing",
"aws. s3. bucket. name": "
"aws. s3. bucket-prefix": "”
"aws. s3.region": "",
"fs. cleanup policy.class": "io.streamthoughts.kafka.connect.filepulse.fs.clean.AmazonS3MoveCleanupPolicy",
"fs. cleanup-policy.move. success.aws.bucket.name": "”
"fs.cleanup.policy.move.success.aws.prefix.path": "”
"fs. cleanup.policy-move.failure.aws.bucket.name": "”
"fs.cleanup.policy.move.failure.aws.prefix.path": “",
"tasks. file.status.storage.bootstrap.servers": “”
"tasks. file.status.storage.topic":
"tasks. file.status.storage.topic.partitions":10,
"tasks. file.status.storage.topic.replication. factor":1,
"errors. log. include messages": "true",
"errors. log enable": "true",
"key, converter": "org. apache. kafka.connect. storage. StringConverter",
"value. converter": "org,apache.kafka.connect.storage.StringConverter"
}