Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cmd/ingester/app/builder/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ func CreateConsumer(logger *zap.Logger, metricsFactory metrics.Factory, spanWrit
ClientID: options.ClientID,
ProtocolVersion: options.ProtocolVersion,
AuthenticationConfig: options.AuthenticationConfig,
FetchMaxMessageBytes: options.FetchMaxMessageBytes,
}
saramaConsumer, err := consumerConfig.NewConsumer(logger)
if err != nil {
Expand Down
9 changes: 9 additions & 0 deletions cmd/ingester/app/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ const (
SuffixTopic = ".topic"
// SuffixRackID is a suffix for the consumer rack-id flag
SuffixRackID = ".rack-id"
// SuffixFetchMaxMessageBytes is a suffix for the consumer fetch-max-message-bytes flag
SuffixFetchMaxMessageBytes = ".fetch-max-message-bytes"
// SuffixGroupID is a suffix for the group-id flag
SuffixGroupID = ".group-id"
// SuffixClientID is a suffix for the client-id flag
Expand Down Expand Up @@ -67,6 +69,8 @@ const (
DefaultEncoding = kafka.EncodingProto
// DefaultDeadlockInterval is the default deadlock interval
DefaultDeadlockInterval = time.Duration(0)
// DefaultFetchMaxMessageBytes is the default for kafka.consumer.fetch-max-message-bytes flag
DefaultFetchMaxMessageBytes = 1024 * 1024 // 1MB
)

// Options stores the configuration options for the Ingester
Expand Down Expand Up @@ -117,6 +121,10 @@ func AddFlags(flagSet *flag.FlagSet) {
KafkaConsumerConfigPrefix+SuffixRackID,
"",
"Rack identifier for this client. This can be any string value which indicates where this client is located. It corresponds with the broker config `broker.rack`")
flagSet.Int(
KafkaConsumerConfigPrefix+SuffixFetchMaxMessageBytes,
DefaultFetchMaxMessageBytes,
"The maximum number of message bytes to fetch from the broker in a single request. So you must be sure this is at least as large as your largest message.")

auth.AddFlags(KafkaConsumerConfigPrefix, flagSet)
}
Expand All @@ -130,6 +138,7 @@ func (o *Options) InitFromViper(v *viper.Viper) {
o.ProtocolVersion = v.GetString(KafkaConsumerConfigPrefix + SuffixProtocolVersion)
o.Encoding = v.GetString(KafkaConsumerConfigPrefix + SuffixEncoding)
o.RackID = v.GetString(KafkaConsumerConfigPrefix + SuffixRackID)
o.FetchMaxMessageBytes = v.GetInt32(KafkaConsumerConfigPrefix + SuffixFetchMaxMessageBytes)

o.Parallelism = v.GetInt(ConfigPrefix + SuffixParallelism)
o.DeadlockInterval = v.GetDuration(ConfigPrefix + SuffixDeadlockInterval)
Expand Down
3 changes: 3 additions & 0 deletions cmd/ingester/app/flags_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ func TestOptionsWithFlags(t *testing.T) {
"--kafka.consumer.group-id=group1",
"--kafka.consumer.client-id=client-id1",
"--kafka.consumer.rack-id=rack1",
"--kafka.consumer.fetch-max-message-bytes=10485760",
"--kafka.consumer.encoding=json",
"--kafka.consumer.protocol-version=1.0.0",
"--ingester.parallelism=5",
Expand All @@ -49,6 +50,7 @@ func TestOptionsWithFlags(t *testing.T) {
assert.Equal(t, []string{"127.0.0.1:9092", "0.0.0:1234"}, o.Brokers)
assert.Equal(t, "group1", o.GroupID)
assert.Equal(t, "rack1", o.RackID)
assert.Equal(t, int32(10485760), o.FetchMaxMessageBytes)
assert.Equal(t, "client-id1", o.ClientID)
assert.Equal(t, "1.0.0", o.ProtocolVersion)
assert.Equal(t, 5, o.Parallelism)
Expand Down Expand Up @@ -108,6 +110,7 @@ func TestFlagDefaults(t *testing.T) {
assert.Equal(t, DefaultGroupID, o.GroupID)
assert.Equal(t, DefaultClientID, o.ClientID)
assert.Equal(t, DefaultParallelism, o.Parallelism)
assert.Equal(t, int32(DefaultFetchMaxMessageBytes), o.FetchMaxMessageBytes)
assert.Equal(t, DefaultEncoding, o.Encoding)
assert.Equal(t, DefaultDeadlockInterval, o.DeadlockInterval)
}
Expand Down
16 changes: 9 additions & 7 deletions pkg/kafka/consumer/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,14 @@ type Configuration struct {
auth.AuthenticationConfig `mapstructure:"authentication"`
Consumer

Brokers []string `mapstructure:"brokers"`
Topic string `mapstructure:"topic"`
InitialOffset int64
GroupID string `mapstructure:"group_id"`
ClientID string `mapstructure:"client_id"`
ProtocolVersion string `mapstructure:"protocol_version"`
RackID string `mapstructure:"rack_id"`
Brokers []string `mapstructure:"brokers"`
Topic string `mapstructure:"topic"`
InitialOffset int64
GroupID string `mapstructure:"group_id"`
ClientID string `mapstructure:"client_id"`
ProtocolVersion string `mapstructure:"protocol_version"`
RackID string `mapstructure:"rack_id"`
FetchMaxMessageBytes int32 `mapstructure:"fetch_max_message_bytes"`
}

// NewConsumer creates a new kafka consumer
Expand All @@ -57,6 +58,7 @@ func (c *Configuration) NewConsumer(logger *zap.Logger) (Consumer, error) {
saramaConfig.Group.Mode = cluster.ConsumerModePartitions
saramaConfig.ClientID = c.ClientID
saramaConfig.RackID = c.RackID
saramaConfig.Consumer.Fetch.Default = c.FetchMaxMessageBytes
if len(c.ProtocolVersion) > 0 {
ver, err := sarama.ParseKafkaVersion(c.ProtocolVersion)
if err != nil {
Expand Down