forked from fgeller/kt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
produce.go
300 lines (264 loc) · 8.94 KB
/
produce.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"os"
"strings"
"time"
"github.com/Shopify/sarama"
)
type produceCmd struct {
commonFlags
topic string
batch int
timeout time.Duration
pretty bool
literal bool
keyCodecType string
valueCodecType string
compressionType string
partitionerType string
maxLineLen int
partitioner sarama.PartitionerConstructor
compression sarama.CompressionCodec
decodeKey func(json.RawMessage) ([]byte, error)
decodeValue func(json.RawMessage) ([]byte, error)
leaders map[int32]*sarama.Broker
}
// producerMessage defines the format of messages
// accepted in the producer command's standard input.
type producerMessage struct {
Value json.RawMessage `json:"value"`
Key json.RawMessage `json:"key"`
Partition *int32 `json:"partition"`
Timestamp *time.Time `json:"time"`
}
var partitioners = map[string]func(topic string) sarama.Partitioner{
"sarama": sarama.NewHashPartitioner,
"std": sarama.NewReferenceHashPartitioner,
"random": sarama.NewRandomPartitioner,
"roundrobin": sarama.NewRoundRobinPartitioner,
}
var compressionTypes = map[string]sarama.CompressionCodec{
"none": sarama.CompressionNone,
"gzip": sarama.CompressionGZIP,
"snappy": sarama.CompressionSnappy,
"lz4": sarama.CompressionLZ4,
"zstd": sarama.CompressionZSTD,
}
func (cmd *produceCmd) addFlags(flags *flag.FlagSet) {
cmd.commonFlags.addFlags(flags)
flags.StringVar(&cmd.topic, "topic", "", "Topic to produce to (required).")
flags.IntVar(&cmd.batch, "batch", 1, "Max size of a batch before sending it off")
flags.DurationVar(&cmd.timeout, "timeout", 50*time.Millisecond, "Duration to wait for batch to be filled before sending it off")
flags.BoolVar(&cmd.pretty, "pretty", true, "Control output pretty printing.")
flags.BoolVar(&cmd.literal, "literal", false, "Interpret stdin line literally and pass it as value, key as null.")
flags.StringVar(&cmd.compressionType, "compression", "none", "Kafka message compression codec [gzip|snappy|lz4]")
flags.StringVar(&cmd.partitionerType, "partitioner", "sarama", "Optional partitioner to use. Available: sarama, std, random, roundrobin")
flags.StringVar(&cmd.keyCodecType, "keycodec", "string", "Interpret message value as (string|hex|base64), defaults to string.")
flags.StringVar(&cmd.valueCodecType, "valuecodec", "json", "Interpret message value as (json|string|hex|base64), defaults to json.")
flags.IntVar(&cmd.maxLineLen, "maxline", 16*1024*1024, "Maximum length of input line")
flags.Usage = func() {
fmt.Fprintln(os.Stderr, "Usage of produce:")
flags.PrintDefaults()
fmt.Fprintln(os.Stderr, produceDocString)
}
}
func (cmd *produceCmd) environFlags() map[string]string {
return map[string]string{
"brokers": "KT_BROKERS",
}
}
func (cmd *produceCmd) run(args []string) error {
partitioner, ok := partitioners[cmd.partitionerType]
if !ok {
return fmt.Errorf("unrecognised -partitioner argument %q", cmd.partitionerType)
}
cmd.partitioner = partitioner
var err error
cmd.decodeValue, err = decoderForType(cmd.valueCodecType)
if err != nil {
return fmt.Errorf("bad -valuecodec argument: %v", err)
}
if cmd.keyCodecType == "json" {
// JSON for keys is not a good idea.
return fmt.Errorf("JSON key codec not supported")
}
cmd.decodeKey, err = decoderForType(cmd.keyCodecType)
if err != nil {
return fmt.Errorf("bad -keycodec argument: %v", err)
}
if cmd.verbose {
sarama.Logger = log.New(os.Stderr, "", log.LstdFlags)
}
compression, ok := compressionTypes[cmd.compressionType]
if !ok {
return fmt.Errorf("unsupported -compression codec %#v - supported: none, gzip, snappy, lz4, zstd", cmd.compressionType)
}
cmd.compression = compression
stdin := make(chan string)
lines := make(chan string)
messages := make(chan producerMessage)
go readStdinLines(cmd.maxLineLen, stdin)
q := make(chan struct{})
go listenForInterrupt(q)
go cmd.readInput(q, stdin, lines)
go cmd.deserializeLines(lines, messages, int32(len(cmd.leaders)))
return cmd.produce(messages)
}
func (cmd *produceCmd) deserializeLines(in chan string, out chan producerMessage, partitionCount int32) {
defer close(out)
for l := range in {
l := l
var msg producerMessage
if cmd.literal {
data, _ := json.Marshal(l) // Marshaling a string can't fail.
msg.Value = json.RawMessage(data)
// TODO allow specifying a key?
} else {
if l = strings.TrimSpace(l); l == "" {
// Ignore blank line.
continue
}
if err := json.Unmarshal([]byte(l), &msg); err != nil {
warningf("skipping invalid JSON input %q: %v", l, err)
continue
}
}
out <- msg
}
}
func (cmd *produceCmd) makeSaramaMessage(msg producerMessage) (*sarama.ProducerMessage, error) {
sm := &sarama.ProducerMessage{
Topic: cmd.topic,
}
if msg.Partition != nil {
// This is a hack to get the manual partition through
// to the partitioner, because we don't know that
// all messages being produced specify the partition or not.
sm.Metadata = *msg.Partition
}
if msg.Key != nil {
key, err := cmd.decodeKey(msg.Key)
if err != nil {
return nil, fmt.Errorf("cannot decode key: %v", err)
}
sm.Key = sarama.ByteEncoder(key)
}
if msg.Value != nil {
value, err := cmd.decodeValue(msg.Value)
if err != nil {
return nil, fmt.Errorf("cannot decode value: %v", err)
}
sm.Value = sarama.ByteEncoder(value)
}
if msg.Timestamp != nil {
sm.Timestamp = *msg.Timestamp
}
return sm, nil
}
func (cmd *produceCmd) produce(in chan producerMessage) error {
cfg, err := cmd.saramaConfig("produce")
if err != nil {
return err
}
cfg.Producer.RequiredAcks = sarama.WaitForAll
cfg.Producer.Compression = cmd.compression
cfg.Producer.Partitioner = func(topic string) sarama.Partitioner {
return producerPartitioner{cmd.partitioner(topic)}
}
producer, err := sarama.NewAsyncProducer(cmd.brokers(), cfg)
if err != nil {
return err
}
go func() {
// Note: if there are producer errors, then this
// goroutine will be left hanging around, but we don't care much.
input := producer.Input()
for m := range in {
sm, err := cmd.makeSaramaMessage(m)
if err != nil {
warningf("invalid message: %v", err)
continue
}
input <- sm
}
producer.AsyncClose()
}()
// We need to read from the producer errors channel until
// the errors channel is closed.
var errors sarama.ProducerErrors
for err := range producer.Errors() {
errors = append(errors, err)
}
if len(errors) > 0 {
for _, err := range errors {
warningf("error producing message %v", err.Err)
}
return errors
}
return nil
}
func (cmd *produceCmd) readInput(q chan struct{}, stdin chan string, out chan string) {
defer close(out)
for {
select {
case l, ok := <-stdin:
if !ok {
return
}
out <- l
case <-q:
return
}
}
}
type producerPartitioner struct {
sarama.Partitioner
}
// Partition implements sarama.Partitioner.Partition by returning the partition
// specified in the input message if it was present, or using the underlying
// user-specified partitioner if not.
func (p producerPartitioner) Partition(m *sarama.ProducerMessage, numPartitions int32) (int32, error) {
if partition, ok := m.Metadata.(int32); ok {
return partition, nil
}
return p.Partitioner.Partition(m, numPartitions)
}
// MessageRequiresConsistency implements sarama.DynamicConsistencyPartitioner.MessageRequiresConsistency
// by querying the underlying partitioner.
func (p producerPartitioner) MessageRequiresConsistency(m *sarama.ProducerMessage) bool {
if p1, ok := p.Partitioner.(sarama.DynamicConsistencyPartitioner); ok {
return p1.MessageRequiresConsistency(m)
}
return p.RequiresConsistency()
}
var produceDocString = `
The value for -brokers can also be set with the environment variable KT_BROKERS.
The value supplied on the command line takes precedence over the environment variable.
Input is read from stdin and separated by newlines.
If you want to use the -partitioner keep in mind that the hashCode
implementation is not the default for Kafka's producer anymore.
To specify the key, value and partition individually pass it as a JSON object
like the following:
{"key": "id-23", "value": "message content", "partition": 0}
In case the input line cannot be interpeted as a JSON object the key and value
both default to the input line and partition to 0.
Examples:
Send a single message with a specific key:
$ echo '{"key": "id-23", "value": "ola", "partition": 0}' | hkt produce -topic greetings
Sent message to partition 0 at offset 3.
$ hkt consume -topic greetings -timeout 1s -offsets 0:3-
{"partition":0,"offset":3,"key":"id-23","message":"ola"}
Keep reading input from stdin until interrupted (via ^C).
$ hkt produce -topic greetings
hello.
Sent message to partition 0 at offset 4.
bonjour.
Sent message to partition 0 at offset 5.
$ hkt consume -topic greetings -timeout 1s -offsets 0:4-
{"partition":0,"offset":4,"key":"hello.","message":"hello."}
{"partition":0,"offset":5,"key":"bonjour.","message":"bonjour."}
`