sparkstreaming分析完数据后,往kafka发送数据报错如下2017-05-04 13:03:35,105 [Executor task launch worker-0] ERROR [org.apache.spark.executor.Executor] 96 - Exception in task 0.0 in stage 59.0 (TID 52)
org.apache.kafka.common.config.ConfigException: Missing required configuration "partition.assignment.strategy" which has no default value.
at org.apache.kafka.common.config.ConfigDef.parse(ConfigDef.java:124)
at org.apache.kafka.common.config.AbstractConfig.<init>(AbstractConfig.java:48)
at org.apache.kafka.clients.consumer.ConsumerConfig.<init>(ConsumerConfig.java:194)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:430)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:413)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:400)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getConfigConsumer(ConfigHelper.java:78)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getConfig(ConfigHelper.java:35)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getProperties(ConfigHelper.java:94)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getProducerProperties(ConfigHelper.java:105)
at com.unistacks.tamboo.kafkaclient.producer.KafkaProducerFactory.getProducerCommon(KafkaProducerFactory.java:33)
at com.unistacks.tamboo.kafkaclient.producer.KafkaProducerFactory.getProducer(KafkaProducerFactory.java:17)
at com.bigdata.spark.utils.KafkaClient.<init>(KafkaClient.java:30)
at com.bigdata.spark.utils.KafkaFactory$$anonfun$getOrCreateProducer$1.apply(KafkaFactory.scala:24)
at com.bigdata.spark.utils.KafkaFactory$$anonfun$getOrCreateProducer$1.apply(KafkaFactory.scala:22)
at scala.collection.mutable.MapLike$class.getOrElseUpdate(MapLike.scala:189)
at scala.collection.mutable.AbstractMap.getOrElseUpdate(Map.scala:91)
at com.bigdata.spark.utils.KafkaFactory$.getOrCreateProducer(KafkaFactory.scala:22)
at com.bigdata.spark.DirectKafkaWordCount$$anonfun$main$2$$anonfun$apply$1.apply(DirectKafkaWordCount.scala:131)
at com.bigdata.spark.DirectKafkaWordCount$$anonfun$main$2$$anonfun$apply$1.apply(DirectKafkaWordCount.scala:130)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:878)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:878)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1765)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1765)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:63)
at org.apache.spark.scheduler.Task.run(Task.scala:70)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
2017-05-04 13:03:35,106 [task-result-getter-0] WARN  [org.apache.spark.scheduler.TaskSetManager] 71 - Lost task 0.0 in stage 59.0 (TID 52, localhost): org.apache.kafka.common.config.ConfigException: Missing required configuration "partition.assignment.strategy" which has no default value.
at org.apache.kafka.common.config.ConfigDef.parse(ConfigDef.java:124)
at org.apache.kafka.common.config.AbstractConfig.<init>(AbstractConfig.java:48)
at org.apache.kafka.clients.consumer.ConsumerConfig.<init>(ConsumerConfig.java:194)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:430)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:413)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:400)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getConfigConsumer(ConfigHelper.java:78)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getConfig(ConfigHelper.java:35)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getProperties(ConfigHelper.java:94)
at com.unistacks.tamboo.kafkaclient.tools.ConfigHelper.getProducerProperties(ConfigHelper.java:105)
at com.unistacks.tamboo.kafkaclient.producer.KafkaProducerFactory.getProducerCommon(KafkaProducerFactory.java:33)
at com.unistacks.tamboo.kafkaclient.producer.KafkaProducerFactory.getProducer(KafkaProducerFactory.java:17)
at com.bigdata.spark.utils.KafkaClient.<init>(KafkaClient.java:30)
at com.bigdata.spark.utils.KafkaFactory$$anonfun$getOrCreateProducer$1.apply(KafkaFactory.scala:24)
at com.bigdata.spark.utils.KafkaFactory$$anonfun$getOrCreateProducer$1.apply(KafkaFactory.scala:22)
at scala.collection.mutable.MapLike$class.getOrElseUpdate(MapLike.scala:189)
at scala.collection.mutable.AbstractMap.getOrElseUpdate(Map.scala:91)
at com..bigdata.spark.utils.KafkaFactory$.getOrCreateProducer(KafkaFactory.scala:22)
at com.bigdata.spark.DirectKafkaWordCount$$anonfun$main$2$$anonfun$apply$1.apply(DirectKafkaWordCount.scala:131)
at com.bigdata.spark.DirectKafkaWordCount$$anonfun$main$2$$anonfun$apply$1.apply(DirectKafkaWordCount.scala:130)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:878)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:878)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1765)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1765)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:63)
at org.apache.spark.scheduler.Task.run(Task.scala:70)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)2017-05-04 13:03:35,110 [task-result-getter-0] ERROR [org.apache.spark.scheduler.TaskSetManager] 75 - Task 0 in stage 59.0 failed 1 times; aborting job请问怎么解决?