Commit 52558dee authored by DeleMing's avatar DeleMing

<dev>

1. mock
parent df164b1f
......@@ -68,10 +68,22 @@
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>1.1.1</version>
<artifactId>kafka-clients</artifactId>
<version>0.8.2.2</version>
</dependency>
<!-- <dependency>-->
<!-- <groupId>org.apache.kafka</groupId>-->
<!-- <artifactId>kafka_2.11</artifactId>-->
<!-- <version>0.8.2.1</version>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.apache.kafka</groupId>-->
<!-- <artifactId>kafka_2.11</artifactId>-->
<!-- <version>1.1.1</version>-->
<!-- </dependency>-->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
......@@ -119,7 +131,7 @@
<!--生成的manifest中classpath的前缀,因为要把第三方jar放到lib目录下,所以classpath的前缀是lib/-->
<classpathPrefix>lib/</classpathPrefix>
<!-- 应用的main class-->
<mainClass>com.zorkdata.tools.oldmock.MockStreamxLogAvro</mainClass>
<mainClass>com.zorkdata.tools.mock.MockLogMergeData</mainClass>
</manifest>
</archive>
<!-- 过滤掉不希望包含在jar中的文件-->
......
......@@ -2,7 +2,9 @@ package com.zorkdata.tools.kafka;
import com.zorkdata.tools.oldkafka.AvroSerializerFactory;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -33,7 +35,6 @@ public class CommonProducer {
initConfig();
Properties props = new Properties();
props.put("bootstrap.servers", kafkaServer);
// props.put("client.id", "webAPI4LogGather"); 不自定义client.id,使用自增
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
......
......@@ -14,6 +14,7 @@ public class CommonProducerPool implements Closeable {
private int threadNum = 15;
/**
* 轮循id
*/
......@@ -22,6 +23,7 @@ public class CommonProducerPool implements Closeable {
private static CommonProducerPool producerInstance;
public static CommonProducerPool getInstance() {
if (producerInstance == null) {
producerInstance = new CommonProducerPool();
}
......
......@@ -15,16 +15,16 @@ public class Config {
private static final Logger logger = LoggerFactory.getLogger(Config.class);
private String kafkaServers;
protected int kafkaBathSize;
protected String topicName;
public String kafkaServers;
public int kafkaBathSize;
public String topicName;
public static final Config INSTANCE = new Config();
/**
* 读取配置文件
*/
private Config() {
public Config() {
try {
Properties properties = PropertiesUtil.getProperties("/config.properties");
kafkaServers = properties.getProperty("kafka.servers");
......
package com.zorkdata.tools.mock;
import org.joda.time.DateTime;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
/**
* @author 谢森
* @Description 时间处理工具类
* @Email xiesen@zork.com.cn
*/
public class DateUtil {
private static DateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS+08:00");
private static ThreadLocal<SimpleDateFormat> sdf = new ThreadLocal<SimpleDateFormat>() {
@Override
protected SimpleDateFormat initialValue() {
return new SimpleDateFormat("yyyy.MM.dd");
}
};
private static ThreadLocal<SimpleDateFormat> utcSdf = new ThreadLocal<SimpleDateFormat>() {
@Override
protected SimpleDateFormat initialValue() {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
}
};
private static ThreadLocal<SimpleDateFormat> utcSdf1 = new ThreadLocal<SimpleDateFormat>() {
@Override
protected SimpleDateFormat initialValue() {
return new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS");
}
};
private static ThreadLocal<SimpleDateFormat> utcSdf2 = new ThreadLocal<SimpleDateFormat>() {
@Override
protected SimpleDateFormat initialValue() {
return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
}
};
private static ThreadLocal<SimpleDateFormat> utcSdf3 = new ThreadLocal<SimpleDateFormat>() {
@Override
protected SimpleDateFormat initialValue() {
return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSSSSS");
}
};
public static Long timestamp(String timestamp) {
return new DateTime(timestamp).toDate().getTime();
}
public static String format(String timestamp) throws ParseException {
return sdf.get().format(new DateTime(timestamp).toDate());
}
public static Long utcDate2Timestamp(String utcDateStr) throws ParseException {
return utcSdf1.get().parse(utcDateStr).getTime();
}
public static Long utcDate2Timestamp2(String utcDateStr) throws ParseException {
return utcSdf2.get().parse(utcDateStr).getTime();
}
public static Long utcDate2Timestamp3(String utcDateStr) throws ParseException {
return utcSdf3.get().parse(utcDateStr).getTime();
}
public static Long utcDate2Timestamp1(String utcDateStr) {
try {
return utcSdf.get().parse(utcDateStr).getTime();
} catch (ParseException e) {
e.printStackTrace();
}
return 0L;
}
public static String getUTCTimeStr() {
return format.format(new Date()).toString();
}
public static String getUTCTimeStr(long interval) {
long currentTimeMillis = System.currentTimeMillis();
return format.format(new Date(currentTimeMillis + interval)).toString();
}
}
......@@ -3,8 +3,6 @@ package com.zorkdata.tools.mock;
import com.alibaba.fastjson.JSONObject;
import com.zorkdata.tools.kafka.CommonProducer;
import com.zorkdata.tools.kafka.CommonProducerPool;
import com.zorkdata.tools.oldkafka.Producer;
import com.zorkdata.tools.oldkafka.ProducerPool;
import java.time.Instant;
import java.util.Random;
......@@ -72,9 +70,9 @@ public class MockFilebeatData {
long size = 10;
for (int i = 0; i < size; i++) {
String json = buildMsg();
CommonProducer producer = CommonProducerPool.getInstance().getProducer();
producer.sendJson(topicName, json);
System.out.println(json);
// CommonProducer producer = CommonProducerPool.getInstance().getProducer();
// producer.sendJson(topicName, json);
}
Thread.sleep(1000);
}
......
package com.zorkdata.tools.mock;
import com.zorkdata.tools.kafka.CommonProducer;
import com.zorkdata.tools.kafka.CommonProducerPool;
import com.zorkdata.tools.kafka.Config;
import java.text.SimpleDateFormat;
import java.util.*;
/**
* @author: LiaoMingtao
* @date: 2020/9/29
*/
public class MockLogMergeData {
private static String req = "{\n" +
"\t\"@timestamp\": \"2020-09-28T19:20:33.876Z\",\n" +
"\t\"@metadata\": {\n" +
"\t\t\"beat\": \"filebeat\",\n" +
"\t\t\"type\": \"doc\",\n" +
"\t\t\"version\": \"6.8.1\"\n" +
"\t},\n" +
"\t\"clustername\": \"上海灾备\",\n" +
"\t\"offset\": 1290600,\n" +
"\t\"message\": \"%s msg : AgwMsgCustLoginReq:<AgwMsgCustLogoutReq:<time=20200917084102005|client_seq_id=%s|client_feature_code=\\\"PC;IIP=192.1.201.1;IPORT=50002;LIP=192.1.201.1;MAC=FCAA149B2CFA;HD=S25TNSAG500643R;PCN=CRANBERRY;CPU=C3060300FFFBEBBF;PI=SDA2^EXT4^454G@SHUNCHU;V1.0.0\\\"|cust_id=\\\" \\\"|fund_account_id=\\\"20053866 \\\"|branch_id=\\\"5101 5101 \\\"|account_id=\\\" \\\"|user_info=\\\" \\\"|>order_way=7|password=\\\"8j46.?#w\\\"|pwd_type=1|>\",\n" +
"\t\"source\": \"/var/log/1000000.log\",\n" +
"\t\"beat\": {\n" +
"\t\t\"name\": \"zorkdata-66\",\n" +
"\t\t\"hostname\": \"zorkdata-66\",\n" +
"\t\t\"version\": \"6.8.1\"\n" +
"\t},\n" +
"\t\"log\": {\n" +
"\t\t\"file\": {\n" +
"\t\t\t\"path\": \"/var/log/1000000.log\"\n" +
"\t\t}\n" +
"\t},\n" +
"\t\"collectRuleId\": 235,\n" +
"\t\"input\": {\n" +
"\t\t\"type\": \"log\"\n" +
"\t},\n" +
"\t\"appsystem\": \"ZJCJCS\",\n" +
"\t\"topicName\": \"log_test\",\n" +
"\t\"appprogramname\": \"上海灾备\",\n" +
"\t\"prospector\": {\n" +
"\t\t\"type\": \"log\"\n" +
"\t},\n" +
"\t\"host\": {\n" +
"\t\t\"name\": \"zorkdata-66\"\n" +
"\t},\n" +
"\t\"servicename\": \"核心1KCXP\",\n" +
"\t\"servicecode\": \"核心1KCXP\",\n" +
"\t\"ip\": \"192.168.2.66\"\n" +
"}";
private static String resp = "{\n" +
"\t\"@timestamp\": \"2020-09-28T19:20:34.176Z\",\n" +
"\t\"@metadata\": {\n" +
"\t\t\"beat\": \"filebeat\",\n" +
"\t\t\"type\": \"doc\",\n" +
"\t\t\"version\": \"6.8.1\"\n" +
"\t},\n" +
"\t\"clustername\": \"上海灾备\",\n" +
"\t\"offset\": 1290600,\n" +
"\t\"message\": \"%s msg : AgwMsgCustLoginResp:<client_seq_id=%s|transact_time=20200917084101188|cust_id=\\\"5782439 \\\"|cust_name=\\\"绠 \\\"|permisson_error_code=0|AgwMsgCustLoginRespFundAccountUnit:<fund_account_id=\\\"20035870 \\\"|branch_id=\\\"5101 5101 \\\"|fund_account_attr=0|AgwMsgCustLoginRespAccountUnit:<account_id=\\\"0157384108 \\\"|market_id=102|>AgwMsgCustLoginRespAccountUnit:<account_id=\\\"A498147845 \\\"|market_id=101|>>AgwMsgCustLoginRespFundAccountUnit:<fund_account_id=\\\"20053866 \\\"|branch_id=\\\"5101 5101 \\\"|fund_account_attr=0|AgwMsgCustLoginRespAccountUnit:<account_id=\\\"0604441070 \\\"|market_id=102|>AgwMsgCustLoginRespAccountUnit:<account_id=\\\"E044418889 \\\"|market_id=101|>>user_info=\\\" \\\"|pwd_err_time=0|bank_code=0|pwd_ciphertext=\\\"8j46.?#w\\\"|pwd_type=1|>\",\n" +
"\t\"source\": \"/var/log/1000000.log\",\n" +
"\t\"beat\": {\n" +
"\t\t\"name\": \"zorkdata-66\",\n" +
"\t\t\"hostname\": \"zorkdata-66\",\n" +
"\t\t\"version\": \"6.8.1\"\n" +
"\t},\n" +
"\t\"log\": {\n" +
"\t\t\"file\": {\n" +
"\t\t\t\"path\": \"/var/log/1000000.log\"\n" +
"\t\t}\n" +
"\t},\n" +
"\t\"collectRuleId\": 235,\n" +
"\t\"input\": {\n" +
"\t\t\"type\": \"log\"\n" +
"\t},\n" +
"\t\"appsystem\": \"ZJCJCS\",\n" +
"\t\"topicName\": \"log_test\",\n" +
"\t\"appprogramname\": \"上海灾备\",\n" +
"\t\"prospector\": {\n" +
"\t\t\"type\": \"log\"\n" +
"\t},\n" +
"\t\"host\": {\n" +
"\t\t\"name\": \"zorkdata-66\"\n" +
"\t},\n" +
"\t\"servicename\": \"核心1KCXP\",\n" +
"\t\"servicecode\": \"核心1KCXP\",\n" +
"\t\"ip\": \"192.168.2.66\"\n" +
"}";
private static String req1 = "{\n" +
"\t\"@timestamp\": \"2020-09-28T19:20:33.876Z\",\n" +
"\t\"@metadata\": {\n" +
"\t\t\"beat\": \"filebeat\",\n" +
"\t\t\"type\": \"doc\",\n" +
"\t\t\"version\": \"6.8.1\"\n" +
"\t},\n" +
"\t\"clustername\": \"上海灾备\",\n" +
"\t\"offset\": 1290600,\n" +
"\t\"message\": \"req\",\n" +
"\t\"beat\": {\n" +
"\t\t\"name\": \"zorkdata-66\",\n" +
"\t\t\"hostname\": \"zorkdata-66\",\n" +
"\t\t\"version\": \"6.8.1\"\n" +
"\t},\n" +
"\t\"log\": {\n" +
"\t\t\"file\": {\n" +
"\t\t\t\"path\": \"/var/log/1000000.log\"\n" +
"\t\t}\n" +
"\t},\n" +
"\t\"collectRuleId\": 235,\n" +
"\t\"input\": {\n" +
"\t\t\"type\": \"log\"\n" +
"\t},\n" +
"\t\"appsystem\": \"ZJCJCS\",\n" +
"\t\"topicName\": \"log_test\",\n" +
"\t\"appprogramname\": \"上海灾备\",\n" +
"\t\"prospector\": {\n" +
"\t\t\"type\": \"log\"\n" +
"\t},\n" +
"\t\"host\": {\n" +
"\t\t\"name\": \"zorkdata-66\"\n" +
"\t},\n" +
"\t\"servicename\": \"核心1KCXP\",\n" +
"\t\"servicecode\": \"核心1KCXP\",\n" +
"\t\"ip\": \"192.168.2.66\"\n" +
"}";
private static String resp1 = "{\n" +
"\t\"@timestamp\": \"2020-09-28T19:20:33.876Z\",\n" +
"\t\"@metadata\": {\n" +
"\t\t\"beat\": \"filebeat\",\n" +
"\t\t\"type\": \"doc\",\n" +
"\t\t\"version\": \"6.8.1\"\n" +
"\t},\n" +
"\t\"clustername\": \"上海灾备\",\n" +
"\t\"offset\": 1290600,\n" +
"\t\"message\": \"resp\",\n" +
"\t\"beat\": {\n" +
"\t\t\"name\": \"zorkdata-66\",\n" +
"\t\t\"hostname\": \"zorkdata-66\",\n" +
"\t\t\"version\": \"6.8.1\"\n" +
"\t},\n" +
"\t\"log\": {\n" +
"\t\t\"file\": {\n" +
"\t\t\t\"path\": \"/var/log/1000000.log\"\n" +
"\t\t}\n" +
"\t},\n" +
"\t\"collectRuleId\": 235,\n" +
"\t\"input\": {\n" +
"\t\t\"type\": \"log\"\n" +
"\t},\n" +
"\t\"appsystem\": \"ZJCJCS\",\n" +
"\t\"topicName\": \"log_test\",\n" +
"\t\"appprogramname\": \"上海灾备\",\n" +
"\t\"prospector\": {\n" +
"\t\t\"type\": \"log\"\n" +
"\t},\n" +
"\t\"host\": {\n" +
"\t\t\"name\": \"zorkdata-66\"\n" +
"\t},\n" +
"\t\"servicename\": \"核心1KCXP\",\n" +
"\t\"servicecode\": \"核心1KCXP\",\n" +
"\t\"ip\": \"192.168.2.66\"\n" +
"}";
public static int getSleepTime() {
Random rand = new Random();
return rand.nextInt(100) + 1;
}
public static List<String> aaa(int size) throws InterruptedException {
List<String> aaa = new ArrayList<>();
for (int i = 0; i < size; i++) {
UUID uuid = UUID.randomUUID();
Date date1 = new Date();
SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSSSSS");
String date1String = simpleDateFormat.format(date1);
Thread.sleep(getSleepTime());
String reqString = String.format(req, date1String, uuid.toString());
Date date2 = new Date();
String date2String = simpleDateFormat.format(date2);
String respString = String.format(resp, date2String, uuid.toString());
aaa.add(reqString);
aaa.add(respString);
}
return aaa;
}
// public static void main(String[] args) throws InterruptedException {
// int size = 1000;
// String topicName = "guotai";
// List<String> aaa = aaa(size);
// for (String json: aaa) {
// CommonProducer producer = CommonProducerPool.getInstance().getProducer();
// producer.sendJson(topicName, json);
// }
// }
public static void main(String[] args) throws InterruptedException {
int size = 1000;
Config config = new Config();
String topicName = config.getTopicName();
for (int i = 0; i < size; i++) {
UUID uuid = UUID.randomUUID();
Date date1 = new Date();
SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSSSSS");
String date1String = simpleDateFormat.format(date1);
Thread.sleep(getSleepTime());
String reqString = String.format(req, date1String, uuid.toString());
Date date2 = new Date();
String date2String = simpleDateFormat.format(date2);
String respString = String.format(resp, date2String, uuid.toString());
// System.out.printf(reqString);
// System.out.printf(respString);
CommonProducer producer = CommonProducerPool.getInstance().getProducer();
producer.sendJson(topicName, reqString);
producer.sendJson(topicName, respString);
}
}
// public static void main(String[] args) throws InterruptedException {
// int size = 1000;
// String topicName = "guotai";
// Boolean a = null;
// if (a) {
// String newValue = topicName.replaceAll(":", "").replaceAll(":", "");
// }
// for (int i = 0; i < size; i++) {
// UUID uuid = UUID.randomUUID();
// Date date1 = new Date();
// SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSSSSS");
// String date1String = simpleDateFormat.format(date1);
// // Thread.sleep(getSleepTime());
// // String reqString = String.format(req, date1String, uuid.toString());
// String reqString = req1;
// Date date2 = new Date();
// String date2String = simpleDateFormat.format(date2);
// // String respString = String.format(resp, date2String, uuid.toString());
// String respString = resp1;
// // System.out.printf(reqString);
// // System.out.printf(respString);
// CommonProducer producer = CommonProducerPool.getInstance().getProducer();
// producer.sendJson(topicName, reqString);
// producer.sendJson(topicName, respString);
//
// }
// }`
}
package com.zorkdata.tools.mock;
import com.zorkdata.tools.kafka.CommonProducer;
import com.zorkdata.tools.kafka.CommonProducerPool;
import com.zorkdata.tools.utils.DateUtil;
import org.mortbay.util.ajax.JSON;
import java.util.HashMap;
import java.util.Map;
......@@ -59,9 +58,19 @@ public class MockStreamxLogAvro {
Map<String, String> dimensions = getRandomDimensions();
Map<String, Double> measures = new HashMap<>(1);
Map<String, String> normalFields = getRandomNormalFields();
CommonProducer producer = CommonProducerPool.getInstance().getProducer();
producer.sendLogAvro(topicName, logTypeName, timestamp, source, offset, dimensions,
measures, normalFields);
Map<String, Object> map = new HashMap<>();
map.put("logTypeName", logTypeName);
map.put("timestamp", timestamp);
map.put("source", source);
map.put("offset", offset);
map.put("dimensions", dimensions);
map.put("measures", measures);
map.put("normalFields", normalFields);
System.out.println(JSON.toString(map).getBytes().length);
// CommonProducer producer = CommonProducerPool.getInstance().getProducer();
// producer.sendLogAvro(topicName, logTypeName, timestamp, source, offset, dimensions,
// measures, normalFields);
}
Thread.sleep(1000);
}
......
......@@ -65,7 +65,7 @@ public class CommonProducer {
/**
* 当 producer 发送消息到 broker 时,broker 需要在规定的时间内返回结果,这个时间就是该参数控制的,默认是 30s
*/
props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000);
// props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000);
/**
* 指定了producer 端用于缓存的缓存区大小,单位是字节,默认是 33554432, 即 32G
......
package com.zorkdata.tools.utils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
/**
* @author DeleMing
*/
public class JConsumerMutil {
private final static Logger log = LoggerFactory.getLogger(JConsumerMutil.class);
private final KafkaConsumer<String, String> consumer;
private ExecutorService executorService;
public JConsumerMutil() {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop:9092");
props.put(ConsumerConfig.GROUP_ID_CONFIG, "ke1");
// 开启自动提交
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
// 自动提交的间隔时间
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
consumer = new KafkaConsumer<String, String>(props);
consumer.subscribe(Arrays.asList("test_kafka_game_x"));
}
public void execute() {
// 初始化线程池
executorService = Executors.newFixedThreadPool(1);
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
if (null != records) {
executorService.submit(new KafkaConsumerThread(records, consumer));
}
}
}
public void shutdown() {
try {
if (consumer != null) {
consumer.close();
}
if (executorService != null) {
executorService.shutdown();
}
if (!executorService.awaitTermination(10, TimeUnit.SECONDS)) {
log.error("shutdown kafka consumer thread timeout.");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
/**
* 消费者线程实例
*/
class KafkaConsumerThread implements Runnable {
private ConsumerRecords<String, String> records;
public KafkaConsumerThread(ConsumerRecords<String, String> records,
KafkaConsumer<String, String> consumer) {
this.records = records;
}
@Override
public void run() {
for (TopicPartition partition : records.partitions()) {
// 获取消费记录数据集
List<ConsumerRecord<String, String>> partitionRecords = this.records.records(partition);
log.info("Thread Id : " + Thread.currentThread().getId());
for (ConsumerRecord<String, String> record : partitionRecords) {
System.out.println("offset =" + record.offset() + ", key=" + record.key() + ", value=" + record.value());
}
}
}
}
public static void main(String[] args) {
JConsumerMutil consumer = new JConsumerMutil();
try {
consumer.execute();
} catch (Exception e) {
log.error("mutil consumer from kafka has error , msg is " + e.getMessage());
consumer.shutdown();
}
}
}
// package com.zorkdata.tools.utils;
//
// import org.apache.kafka.clients.consumer.ConsumerConfig;
// import org.apache.kafka.clients.consumer.ConsumerRecord;
// import org.apache.kafka.clients.consumer.ConsumerRecords;
// import org.apache.kafka.clients.consumer.KafkaConsumer;
// import org.apache.kafka.common.TopicPartition;
// import org.apache.kafka.common.serialization.StringDeserializer;
// import org.slf4j.Logger;
// import org.slf4j.LoggerFactory;
//
// import java.util.Arrays;
// import java.util.List;
// import java.util.Properties;
// import java.util.concurrent.ExecutorService;
// import java.util.concurrent.Executors;
// import java.util.concurrent.TimeUnit;
//
//
// /**
// * @author DeleMing
// */
// public class JConsumerMutil {
// private final static Logger log = LoggerFactory.getLogger(JConsumerMutil.class);
// private final KafkaConsumer<String, String> consumer;
// private ExecutorService executorService;
//
// public JConsumerMutil() {
// Properties props = new Properties();
// props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop:9092");
// props.put(ConsumerConfig.GROUP_ID_CONFIG, "ke1");
// // 开启自动提交
// props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
//
// // 自动提交的间隔时间
// props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
// props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
// props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
//
// consumer = new KafkaConsumer<String, String>(props);
// consumer.subscribe(Arrays.asList("test_kafka_game_x"));
// }
//
// public void execute() {
// // 初始化线程池
// executorService = Executors.newFixedThreadPool(1);
// while (true) {
// ConsumerRecords<String, String> records = consumer.poll(100);
// if (null != records) {
// executorService.submit(new KafkaConsumerThread(records, consumer));
// }
// }
// }
//
// public void shutdown() {
//
// try {
// if (consumer != null) {
// consumer.close();
// }
//
// if (executorService != null) {
// executorService.shutdown();
// }
//
// if (!executorService.awaitTermination(10, TimeUnit.SECONDS)) {
// log.error("shutdown kafka consumer thread timeout.");
// }
// } catch (InterruptedException e) {
// Thread.currentThread().interrupt();
// }
// }
//
//
// /**
// * 消费者线程实例
// */
// class KafkaConsumerThread implements Runnable {
//
// private ConsumerRecords<String, String> records;
//
// public KafkaConsumerThread(ConsumerRecords<String, String> records,
// KafkaConsumer<String, String> consumer) {
// this.records = records;
// }
//
// @Override
// public void run() {
// for (TopicPartition partition : records.partitions()) {
// // 获取消费记录数据集
// List<ConsumerRecord<String, String>> partitionRecords = this.records.records(partition);
// log.info("Thread Id : " + Thread.currentThread().getId());
// for (ConsumerRecord<String, String> record : partitionRecords) {
// System.out.println("offset =" + record.offset() + ", key=" + record.key() + ", value=" + record.value());
// }
// }
// }
// }
//
//
// public static void main(String[] args) {
// JConsumerMutil consumer = new JConsumerMutil();
// try {
// consumer.execute();
// } catch (Exception e) {
// log.error("mutil consumer from kafka has error , msg is " + e.getMessage());
// consumer.shutdown();
// }
// }
// }
//
kafka.servers=kafka01:9092
#kafka.servers=kafka1:9092,kafka2:9092,kafka3:9092
kafka.servers=kafka-1:9092,kafka-2:9092,kafka-3:9092
#kafka.servers=kafka-1:19092,kafka-2:19092,kafka-3:19092
# ,kafka02:9092,kafka03:9092
kafka.batch.size=100000
kafka.topic.name=a
kafka.batch.size=1
kafka.topic.name=gutoai
key.serializer=org.apache.kafka.common.serialization.StringSerializer
value.serializer=org.apache.kafka.common.serialization.ByteArraySerializer
#value.serializer=org.apache.kafka.common.serialization.StringSerializer
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment