diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5457634..d168217 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,7 +3,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
## 2.2.0 (4/24/2018)
-- [Issue-5](https://github.com/salesforce/kafka-junit/issues/5) Updated to support Kafka versions 1.0.x and 1.1.x. Thanks [kasuri](https://github.com/kasuri)!
+- [Issue-5](https://github.com/salesforce/kafka-junit/issues/5) Updated to support Kafka versions 1.0.x and 1.1.x. Thanks [kasuri](https://github.com/kasuri)!
+- [Issue-4](https://github.com/salesforce/kafka-junit/issues/4) Fix server configuration to allow for transactional producers & consumers.
### Breaking Change
This library now requires you to provide which version of Kafka you want to use.
diff --git a/kafka-junit-core/pom.xml b/kafka-junit-core/pom.xml
index 5d8e3e7..3a0979d 100644
--- a/kafka-junit-core/pom.xml
+++ b/kafka-junit-core/pom.xml
@@ -11,4 +11,16 @@
kafka-junit-core2.2.0
+
+
+
+
+
+
+ org.junit.jupiter
+ junit-jupiter-api
+ 5.1.1
+ test
+
+
\ No newline at end of file
diff --git a/kafka-junit-core/src/main/java/com/salesforce/kafka/test/KafkaTestServer.java b/kafka-junit-core/src/main/java/com/salesforce/kafka/test/KafkaTestServer.java
index baa556a..2c7dec8 100644
--- a/kafka-junit-core/src/main/java/com/salesforce/kafka/test/KafkaTestServer.java
+++ b/kafka-junit-core/src/main/java/com/salesforce/kafka/test/KafkaTestServer.java
@@ -43,6 +43,7 @@
import java.io.File;
import java.util.Collections;
+import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
@@ -161,6 +162,8 @@ public void start() throws Exception {
kafkaProperties.setProperty("offsets.topic.replication.factor", "1");
kafkaProperties.setProperty("offset.storage.replication.factor", "1");
kafkaProperties.setProperty("transaction.state.log.replication.factor", "1");
+ kafkaProperties.setProperty("transaction.state.log.min.isr", "1");
+ kafkaProperties.setProperty("transaction.state.log.num.partitions", "4");
kafkaProperties.setProperty("config.storage.replication.factor", "1");
kafkaProperties.setProperty("status.storage.replication.factor", "1");
kafkaProperties.setProperty("default.replication.factor", "1");
@@ -228,15 +231,39 @@ public KafkaProducer getKafkaProducer(
final Class extends Serializer> keySerializer,
final Class extends Serializer> valueSerializer) {
+ return getKafkaProducer(keySerializer, valueSerializer, new Properties());
+ }
+
+ /**
+ * Creates a kafka producer that is connected to our test server.
+ * @param Type of message key
+ * @param Type of message value
+ * @param keySerializer Class of serializer to be used for keys.
+ * @param valueSerializer Class of serializer to be used for values.
+ * @param config Additional producer configuration options to be set.
+ * @return KafkaProducer configured to produce into Test server.
+ */
+ public KafkaProducer getKafkaProducer(
+ final Class extends Serializer> keySerializer,
+ final Class extends Serializer> valueSerializer,
+ final Properties config) {
+
// Build config
- final Map kafkaProducerConfig = Maps.newHashMap();
+ final Map kafkaProducerConfig = new HashMap<>();
kafkaProducerConfig.put("bootstrap.servers", getKafkaConnectString());
- kafkaProducerConfig.put("key.serializer", keySerializer);
- kafkaProducerConfig.put("value.serializer", valueSerializer);
kafkaProducerConfig.put("max.in.flight.requests.per.connection", 1);
kafkaProducerConfig.put("retries", 5);
kafkaProducerConfig.put("client.id", getClass().getSimpleName() + " Producer");
kafkaProducerConfig.put("batch.size", 0);
+ kafkaProducerConfig.put("key.serializer", keySerializer);
+ kafkaProducerConfig.put("value.serializer", valueSerializer);
+
+ // Override config
+ if (config != null) {
+ for (Map.Entry