forked from fwang7/node-red-contrib-kafka-node
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathkafka.js
416 lines (390 loc) · 15 KB
/
kafka.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
/**
* Created by fwang1 on 3/25/15.
*/
const kafka = require('kafka-node');
const kafkaBatchRunner = require('./kafka-batch-runner');
const ConsistentHash = require('consistent-hash');
module.exports = function kafkaNodes(RED) {
/*
* Kafka Producer
* Parameters:
- topics
- zkquorum(example: zkquorum = “[host]:2181")
*/
function kafkaNode(config) {
RED.nodes.createNode(this, config);
const node = this;
const disabled = config.disabled;
if (disabled) {
node.status({
fill: 'red',
shape: 'dot',
text: 'Disabled',
});
node.on('input', (msg) => {
// console.log('input');
node.send({
payload: {
sent: false,
error: 'Node disabled',
msg,
},
});
});
return;
}
const retryTimeouts = new Set();
const oldRetryTimeoutsAdd = retryTimeouts.add;
retryTimeouts.add = (fn, timeout) => {
const thisTimeOut = setTimeout(() => {
retryTimeouts.delete(thisTimeOut);
fn();
}, timeout);
oldRetryTimeoutsAdd.call(retryTimeouts, thisTimeOut);
};
const clusterZookeeper = config.zkquorum;
// const debug = (config.debug == 'debug');
const HighLevelProducer = kafka.HighLevelProducer;
const Client = kafka.Client;
const topics = config.topics;
const debugOn = config.debug;
const topicArray = topics.split(',');
const clientWrapper = {};
clientWrapper.client = new Client(clusterZookeeper);
clientWrapper.producer = new HighLevelProducer(clientWrapper.client);
clientWrapper.open = true;
function debug(...rest) {
if (debugOn) {
console.log(...rest);// eslint-disable-line no-console
}
}
// Balance type : one of [
// actual: msg.partition is used as partition number for all messages
// rr: roundrobin message distribution over all the partitions
// id: msg.hashId is used to derive a hash code, and that code is assigned as partition number
// is helpful if you want to make sure messages with
// one hashId go to same partition. msg.hashId posted as key of message
// none: partitioning and everthing else is left to internal implementation
// is helpful if you only want a basic HighLevelProducer, without focusing
// on partitions. msg.partition and msg.hashId as key are still posted to kafka with message
// ]
const balance = config.balance;
// optional parameter, will hace comma-seperated entries corresponding to each topic
const totalPartitions = config.partitions && config.partitions.split(',').map(val => parseInt(val, 10));
// if partitions are provided, validate them
if (totalPartitions && totalPartitions.length !== topicArray.length) {
return node.error('Total topics != Total partition counts \nPartition counts, if provided, should be for each topic, in comma-seperated format');
}
// check if a metadata fetch from zookeeper is required or not
// if totalPartitions are provided, we don't need totalPartitions from metadata
// if balance is none or actual, no need to maintain totalPartitions at all
const metadataFetchNotRequired = totalPartitions || balance === 'none' || balance === 'actual';
// console.log(`${JSON.stringify(totalPartitions)}.length || ${balance} === none || ${balance} === actual :==== ${metadataFetchNotRequired}`);
// balancer is a function which will balance the partitions for messages.
// Is constructed based on balance type
node.balancer = null;
// metadata(partition counts) is stored here
const topicMetadata = {
};
// Is used to store state of producer, will be one of
// - RoundRobin previous partition number, for each topic
// - ConsistentHash object for each topic
const topicDict = {};
node.on('close', () => {
clientWrapper.client.close();
clientWrapper.open = false;
[...retryTimeouts].forEach(tid => clearTimeout(tid));
});
/**
* @param partitionBuilder - function which creates partition number
* invoked with (topic)
* @param key - key assigned to message (see kafka-key)
* @param msg - the msg object
*/
function produce(partitionBuilder, key, msg) {
const payloads = topicArray.map(topic => ({
topic,
partition: partitionBuilder(topic),
key,
messages: msg.payload,
}));
debug(`Sending Message: ${JSON.stringify(payloads)}`);
clientWrapper.producer.send(payloads, (err, data) => { // eslint-disable-line consistent-return
if (err) {
return node.error(err);
}
debug(`Kafka success Response ${JSON.stringify(data)}`);
// console.log('success response');
node.send({
payload: {
sent: true,
msg,
},
});
});
}
/**
* This fuction creates metadata for the balacer.
* @param callback - is executed when function executes successfully
**/
function fetchMetadata(callback) {
try {
// If meta data fecth from zk server is realy required or not
if (metadataFetchNotRequired) {
// if partition counts are provided
if (totalPartitions) {
topicArray.forEach((topic, i) => {
topicMetadata[topic] = { partitions: totalPartitions[i] || 1 };
});
node.status({
fill: 'green',
shape: 'dot',
text: 'metadata loaded from config',
});
return callback();
}
node.status({
fill: 'green',
shape: 'dot',
text: 'metadata not required',
});
return callback();
}
// start fetch from zk server
node.status({
fill: 'grey',
shape: 'dot',
text: `fetching metadata from ${clusterZookeeper}`,
});
// try fetch
const loadMetadataForTopics = function fetchMetaRetryWrapper() {
if (!clientWrapper.open) {
return;
}
this.retries++;
const thisCtx = this;
debug(`Trying metadata fetch, try no: ${thisCtx.retries}`);
clientWrapper.client.loadMetadataForTopics(topicArray, (err, metadataResponse) => { // eslint-disable-line func-names
// debug(metadataResponse);
if (err) {
// retry if err
debug(`received Error, will retry, error: ${JSON.stringify(err, null, 2)}`);
const topicsLeft = topicArray.filter(topic =>
(Object.keys(topicMetadata).indexOf(topic) === -1));
debug(`topics left are ${JSON.stringify(topicsLeft)}`);
if (topicsLeft.length) {
if (thisCtx.retries % 10 === 0) {
clientWrapper.client.close((...rest) => {
debug('closed previous connention ', JSON.stringify(rest));
clientWrapper.open = false;
retryTimeouts.add(() => {
clientWrapper.client = new Client(clusterZookeeper);
clientWrapper.producer = new HighLevelProducer(clientWrapper.client);
clientWrapper.open = true;
loadMetadataForTopics();
}, 30000);
});
}
retryTimeouts.add(loadMetadataForTopics, 2000);
return;
}
}
// console.log('metadata is: ', JSON.stringify(metadataResponse, null, 4));
try {
const metadata = metadataResponse[1].metadata;
const localTopicMetaData = {};
Object.keys(metadata).forEach((topicKey) => {
const topicMetadataResponse = metadata[topicKey];
Object.keys(topicMetadataResponse).forEach((key) => {
const topic = topicMetadataResponse[key].topic;
const topicData = localTopicMetaData[topic] = localTopicMetaData[topic] || { partitions: 0 };
topicData.partitions++;
});
});
if (Object.keys(localTopicMetaData).length) {
Object.keys(localTopicMetaData).forEach((topic) => {
topicMetadata[topic] = localTopicMetaData[topic];
});
debug('metadata recieved', JSON.stringify(metadataResponse, null, 2));
node.status({
fill: 'green',
shape: 'dot',
text: `fetched metadata for ${Object.keys(topicMetadata).join(', ')}`,
});
callback(Object.keys(topicMetadata));
debug('metadata built', JSON.stringify(topicMetadata, null, 2));
} else if (metadataResponse[1].error) {
node.error(metadataResponse[1].error);
}
} catch (e) {
debug('metadata received with error: ', JSON.stringify(metadataResponse, null, 2));
return node.error(e);
}
// node.send({ payload: 'blah blah' });
});
}.bind({ retries: [] });
loadMetadataForTopics();
} catch (e) {
node.error(e);
}
}
fetchMetadata((topicsAdded) => {
try {
debug('balance type is %s', balance);
switch (balance) {
case 'id':
topicsAdded.forEach((topic) => {
const hr = (topicDict[topic] = {
hr: new ConsistentHash(),
}).hr;
for (let i = 0; i < topicMetadata[topic].partitions; i++) {
hr.add(i);
}
});
node.balancer = function idBalancer(msg) {
if (!msg.hashId) {
return node.error('msg.hashId is mandatory in case of consistent-hash');
}
const key = msg.hashId;
// console.log(topicDict);
produce(topicKey => topicDict[topicKey].hr.get(key), key, msg);
};
break;
case 'actual':
node.balancer = function actualBalance(msg) {
if (!msg.partition) {
return node.error('msg.partition is mandatory in case of partition');
}
produce(() => msg.partition, undefined, msg);
};
break;
case 'rr':
topicsAdded.forEach((topic) => {
const partitionCount = topicMetadata[topic].partitions;
topicDict[topic] = {
value: -1,
next() {
return (this.value = (this.value + 1) % partitionCount);
},
};
});
node.balancer = function roundrobinBalancer(msg) {
produce(topicKey => topicDict[topicKey].next(), undefined, msg);
};
break;
default:
node.balancer = function noneBalancer(msg) {
produce(() => msg.partition, msg.hashId, msg);
};
}
debug(`node.balancer is ${node.balancer}`);
} catch (e) {
return node.error(e);
}
});
this.on('input', (msg) => {
try {
if (!node.balancer) {
// console.log('no balancer');
node.send({
payload: false,
});
return node.error('Producer Not Ready');
}
node.balancer(msg);
} catch (e) {
node.error(e);
}
});
}
RED.nodes.registerType('kafka', kafkaNode);
/*
* Kafka Consumer
* Parameters:
- topics
- groupId
- zkquorum(example: zkquorum = “[host]:2181")
*/
function kafkaInNode(config) {
RED.nodes.createNode(this, config);
const node = this;
const consumerType = config.consumerType || 'ConsumerGroup';
const fetchMaxBytes = parseInt(config.fetchMaxBytes, 10);
const HighLevelConsumer = kafka.HighLevelConsumer;
const Client = kafka.Client;
let topics = String(config.topics);
const clusterZookeeper = config.zkquorum;
const groupId = config.groupId;
const debug = (config.debug === 'debug');
const client = new Client(clusterZookeeper);
const cgTopics = topics.split(',');
topics = cgTopics.map(topic => ({
topic,
}));
// console.log('fetchMaxBytes = ', fetchMaxBytes);
// console.log('consumerType =', consumerType);
// console.log('debug =', debug);
if (consumerType === 'high-level-consumer') {
const options = {
groupId,
autoCommit: config.autoCommit,
autoCommitMsgCount: 10,
};
try {
const consumer = new HighLevelConsumer(client, topics, options);
this.log('Consumer created...');
this.status({
fill: 'green',
shape: 'dot',
text: `connected to ${clusterZookeeper}`,
});
consumer.on('message', (message) => {
if (debug) {
console.log(message); // eslint-disable-line no-console
node.log(message);
}
const msg = { payload: message };
// console.log('message');
node.send(msg);
});
consumer.on('error', (err) => {
node.error(err);
});
} catch (e) {
node.error(e);
}
} else {
try {
const options = {
host: clusterZookeeper,
zk: {
sessionTimeout: 10000,
}, // put client zk settings if you need them (see Client)
batch: undefined, // put client batch settings if you need them (see Client)
// ssl: true, // optional (defaults to false) or tls options hash
groupId,
autoCommit: false,
sessionTimeout: 15000,
// An array of partition assignment protocols ordered by preference.
// 'roundrobin' or 'range' string for built ins (see below to pass in custom assignment protocol)
protocol: ['roundrobin'],
// Offsets to use for new groups other options could be 'earliest' or 'none' (none will emit an error if no offsets were saved)
// equivalent to Java client's auto.offset.reset
fromOffset: 'earliest', // default
// how to recover from OutOfRangeOffset error (where save offset is past server retention) accepts same value as fromOffset
outOfRangeOffset: 'earliest', // default
migrateHLC: false, // for details please see Migration section below
fetchMaxWaitMs: 100,
fetchMinBytes: 1,
fetchMaxBytes,
};
kafkaBatchRunner(node, options, cgTopics, {
debug,
});
} catch (e) {
node.error(e);
}
}
}
RED.nodes.registerType('kafka in', kafkaInNode);
};