其他分享
首页 > 其他分享> > 1.1 kafka搭建

1.1 kafka搭建

作者:互联网

Kafka部署

实验环境

Centos

Memory

cpu

7.6

2G

2G

kafka_2.11-1.1.1

zookeeper-3.4.14.tar

 

aliyun云服务器

39.99.224.205 kafka01

39.99.132.23 kafka02

39.101.130.158 kafka03

#添加hosts文件

   [root@kafka01 ~]# cat <<END >>/etc/hosts

> 39.99.224.205 kafka01

> 39.99.132.23 kafka02

> 39.101.130.158 kafka03

> END

  [root@kafka02 ~]# cat <<END >>/etc/hosts

> 39.99.224.205 kafka01

> 39.99.132.23 kafka02

> 39.101.130.158 kafka03

> END

    [root@kafka03 ~]# cat <<END >>/etc/hosts

> 39.99.224.205 kafka01

> 39.99.132.23 kafka02

> 39.101.130.158 kafka03

> END

#添加密钥

[root@kafka01 ~]# ssh-keygen -t rsa

Generating public/private rsa key pair.

Enter file in which to save the key (/root/.ssh/id_rsa):

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in /root/.ssh/id_rsa.

Your public key has been saved in /root/.ssh/id_rsa.pub.

The key fingerprint is:

SHA256:FApgI9Ew+XwjO+KXRjGDLoQ/c4sQF7lbWqZxnRT6Gjo root@kafka01

The key's randomart image is:

+---[RSA 2048]----+

|==+o. ...        |

|.++. o.. .       |

|.o.o.o...        |

|ooB+B.o.         |

|o+ &=.. S        |

|+.Xo.o           |

|ooE*o.           |

| ..=.            |

|  o              |

+----[SHA256]-----+

[root@kafka01 ~]# ssh-copy-id -i .ssh/id_rsa.pub kafka02:

[root@kafka01 ~]# ssh-copy-id -i .ssh/id_rsa.pub kafka03:

[root@kafka01 ~]# ssh-copy-id -i .ssh/id_rsa.pub kafka01:

#kafka02生成密钥

[root@kafka02 ~]# ssh-keygen -t rsa

Generating public/private rsa key pair.

Enter file in which to save the key (/root/.ssh/id_rsa):

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in /root/.ssh/id_rsa.

Your public key has been saved in /root/.ssh/id_rsa.pub.

The key fingerprint is:

SHA256:CjjZVU5cs/3T1hFkhEyULdBxCaxTdy/momOAgg0P2Xs root@kafka02

The key's randomart image is:

+---[RSA 2048]----+

|       .o.o.B=O*.|

|       +.  + Oo+o|

|   o  . . . + o.o|

|  ++..     o .o.+|

|  +*o. .S   .oo.+|

|  ..=.E..   . .o |

|     o.  . . .   |

|          +      |

|         . .     |

+----[SHA256]-----+

[root@kafka02 ~]# ssh-copy-id -i .ssh/id_rsa.pub kafka01:

[root@kafka02 ~]# ssh-copy-id -i .ssh/id_rsa.pub kafka02:

[root@kafka02 ~]# ssh-copy-id -i .ssh/id_rsa.pub kafka03:

#kafka03生成密钥

[root@kafka023~]# ssh-keygen -t rsa

Generating public/private rsa key pair.

Enter file in which to save the key (/root/.ssh/id_rsa):

Enter passphrase (empty for no passphrase):

Enter same passphrase again:

Your identification has been saved in /root/.ssh/id_rsa.

Your public key has been saved in /root/.ssh/id_rsa.pub.

The key fingerprint is:

SHA256:CjjZVU5cs/3T1hFkhEyULdBxCaxTdy/momOAgg0P2Xs root@kafka02

The key's randomart image is:

+---[RSA 2048]----+

|       .o.o.B=O*.|

|       +.  + Oo+o|

|   o  . . . + o.o|

|  ++..     o .o.+|

|  +*o. .S   .oo.+|

|  ..=.E..   . .o |

|     o.  . . .   |

|          +      |

|         . .     |

+----[SHA256]-----+

[root@kafka03 ~]# ssh-copy-id -i .ssh/id_rsa.pub kafka01:

[root@kafka03~]# ssh-copy-id -i .ssh/id_rsa.pub kafka02:

[root@kafka03~]# ssh-copy-id -i .ssh/id_rsa.pub kafka03:

#上传软件

[root@kafka01 ~]# scp zookeeper-3.4.14.tar.gz kafka_2.11-1.1.1.tgz jdk-8u221-linux-x64.tar.gz kafka02:

[root@kafka01 ~]# scp zookeeper-3.4.14.tar.gz kafka_2.11-1.1.1.tgz jdk-8u221-linux-x64.tar.gz kafka03:

#解压jdk

[root@kafka01 ~]# tar xf jdk-8u221-linux-x64.tar.gz -C /usr/local/

[root@kafka01 ~]# cd /usr/local/

[root@kafka01 local]# ls

aegis  etc    include       lib    libexec  share  zookeeper

bin    games  jdk1.8.0_221  lib64  sbin     src

[root@kafka01 local]# mv jdk1.8.0_221 java

[root@kafka01 local]# vim /etc/profile.d/java.sh

[root@kafka01 local]# cat /etc/profile.d/java.sh

export JAVA_HOME=/usr/local/java

export PATH=$PATH:$JAVA_HOME/bin

[root@kafka01 ~]# . /etc/profile.d/java.sh

[root@kafka01 ~]# java -version

java version "1.8.0_221"

Java(TM) SE Runtime Environment (build 1.8.0_221-b11)

Java HotSpot(TM) 64-Bit Server VM (build 25.221-b11, mixed mode

 

Kafka02 kafka03同上

#解压zookeeper

 

[root@kafka01 ~]# tar xf zookeeper-3.4.14.tar.gz -C /usr/local/

[root@kafka01 ~]# cd /usr/local/

[root@kafka01 local]# ls

aegis  bin  etc  games  include  lib  lib64  libexec  sbin  share  src  zookeeper-3.4.14

[root@kafka01 local]# mv zookeeper-3.4.14 zookeeper

[root@kafka01 local]# cd zookeeper/conf/

[root@kafka01 conf]# ls

configuration.xsl  log4j.properties  zoo_sample.cfg

[root@kafka01 conf]# cp zoo_sample.cfg zoo.cfg

[root@kafka01 conf]# cat zoo.cfg

# The number of milliseconds of each tick

tickTime=2000

# The number of ticks that the initial

# synchronization phase can take

initLimit=10

# The number of ticks that can pass between

# sending a request and getting an acknowledgement

syncLimit=5

# the directory where the snapshot is stored.

# do not use /tmp for storage, /tmp here is just

# example sakes.

dataDir=/usr/local/zookeeper/date

# the port at which the clients will connect

clientPort=2181

# the maximum number of client connections.

# increase this if you need to handle more clients

#maxClientCnxns=60

#

# Be sure to read the maintenance section of the

# administrator guide before turning on autopurge.

#

# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance

#

# The number of snapshots to retain in dataDir

#autopurge.snapRetainCount=3

# Purge task interval in hours

# Set to "0" to disable auto purge feature

#autopurge.purgeInterval=1

server.1=kafka01:2888:3888

server.2=kafka02:2888:3888

server.3=kafka03:2888:3888

quorumListenOnAllIPs=true

 

[root@kafka01 zookeeper]# mkdir date

[root@kafka01 zookeeper]# echo "1" >/date/myid

#kafka02步骤同上

[root@kafka02 ~]# tar xf zookeeper-3.4.14.tar.gz -C /usr/local/

[root@kafka02 ~]# cd /usr/local/

[root@kafka01 local]# mv zookeeper-3.4.14 zookeeper

[root@kafka02 local]# cd zookeeper/conf/

[root@kafka02 conf]# ls

configuration.xsl  log4j.properties  zoo_sample.cfg

[root@kafka02 conf]# cp zoo_sample.cfg zoo.cfg

[root@kafka02 conf]# cat zoo.cfg

# The number of milliseconds of each tick

tickTime=2000

# The number of ticks that the initial

# synchronization phase can take

initLimit=10

# The number of ticks that can pass between

# sending a request and getting an acknowledgement

syncLimit=5

# the directory where the snapshot is stored.

# do not use /tmp for storage, /tmp here is just

# example sakes.

dataDir=/usr/local/zookeeper/date

# the port at which the clients will connect

clientPort=2181

# the maximum number of client connections.

# increase this if you need to handle more clients

#maxClientCnxns=60

#

# Be sure to read the maintenance section of the

# administrator guide before turning on autopurge.

#

# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance

#

# The number of snapshots to retain in dataDir

#autopurge.snapRetainCount=3

# Purge task interval in hours

# Set to "0" to disable auto purge feature

#autopurge.purgeInterval=1

server.1=kafka01:2888:3888

server.2=kafka02:2888:3888

server.3=kafka03:2888:3888

quorumListenOnAllIPs=true

 

[root@kafka02 zookeeper]# mkdir date

[root@kafka02 zookeeper]# echo "2" >/date/myid

#kafka03步骤同上

前段略

[root@kafka03 zookeeper]# mkdir date

[root@kafka03 zookeeper]# echo "3" >/date/myid

#启动zookeeper

#kafka01

[root@kafka01 zookeeper]# ./bin/zkServer.sh start

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg

Starting zookeeper ... STARTED

#kafka02

[root@kafka02 zookeeper]# ./bin/zkServer.sh start

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg

Starting zookeeper ... STARTED

#kafka03

[root@kafka03 zookeeper]# ./bin/zkServer.sh start

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg

Starting zookeeper ... STARTED

#查看状态

[root@kafka01 zookeeper]# ./bin/zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg

Mode: follower

  

 

[root@kafka02 zookeeper]# ./bin/zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg

Mode: follower

[root@kafka03 zookeeper]# ./bin/zkServer.sh status

ZooKeeper JMX enabled by default

Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg

Mode: leader

#添加全局变量

[root@kafka01 zookeeper]# echo "PATH=$PATH:/usr/local/zookeeper/bin/" >>/etc/profile

[root@kafka02 zookeeper]# echo "PATH=$PATH:/usr/local/zookeeper/bin/" >>/etc/profile

[root@kafka03 zookeeper]# echo "PATH=$PATH:/usr/local/zookeeper/bin/" >>/etc/profile

部署kafka

[root@kafka01 ~]# tar xf kafka_2.11-1.1.1.tgz -C /usr/local/

[root@kafka01 ~]# cd /usr/local/

[root@kafka01 local]# ls

aegis  etc    include  kafka_2.11-1.1.1  lib64    sbin   src

bin    games  java     lib               libexec  share  zookeeper

[root@kafka01 local]# mv kafka_2.11-1.1.1 kafka

[root@kafka01 local]# cd kafka/

[root@kafka01 kafka]# ls

bin  config  libs  LICENSE  NOTICE  site-docs

[root@kafka01 kafka]# cd config/

[root@kafka01 config]# ls

connect-console-sink.properties    consumer.properties

connect-console-source.properties  log4j.properties

connect-distributed.properties     producer.properties

connect-file-sink.properties       server.properties

connect-file-source.properties     tools-log4j.properties

connect-log4j.properties           zookeeper.properties

connect-standalone.properties

[root@kafka01 config]# cp server.properties{,.bak}

[root@kafka01 config]# vim server.properties

 

# Licensed to the Apache Software Foundation (ASF) under one or more

# contributor license agreements.  See the NOTICE file distributed with

# this work for additional information regarding copyright ownership.

# The ASF licenses this file to You under the Apache License, Version 2.0

# (the "License"); you may not use this file except in compliance with

# the License.  You may obtain a copy of the License at

#

#    http://www.apache.org/licenses/LICENSE-2.0

#

# Unless required by applicable law or agreed to in writing, software

# distributed under the License is distributed on an "AS IS" BASIS,

# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

# See the License for the specific language governing permissions and

# limitations under the License.

 

# see kafka.server.KafkaConfig for additional details and defaults

 

############################# Server Basics #############################

 

# The id of the broker. This must be set to a unique integer for each broker.

broker.id=0

delete.topic.enable=true

############################# Socket Server Settings #############################

 

# The address the socket server listens on. It will get the value returned from

# java.net.InetAddress.getCanonicalHostName() if not configured.

#   FORMAT:

#     listeners = listener_name://host_name:port

#   EXAMPLE:

#     listeners = PLAINTEXT://your.host.name:9092

listeners=PLAINTEXT://:9092

 

# Hostname and port the broker will advertise to producers and consumers. If not set,

# it uses the value for "listeners" if configured.  Otherwise, it will use the value

# returned from java.net.InetAddress.getCanonicalHostName().

#advertised.listeners=PLAINTEXT://your.host.name:9092

 

# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details

#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL

 

# The number of threads that the server uses for receiving requests from the network and sending responses to the network

num.network.threads=3

 

# The number of threads that the server uses for processing requests, which may include disk I/O

num.io.threads=8

 

# The send buffer (SO_SNDBUF) used by the socket server

socket.send.buffer.bytes=102400

 

# The receive buffer (SO_RCVBUF) used by the socket server

socket.receive.buffer.bytes=102400

 

# The maximum size of a request that the socket server will accept (protection against OOM)

socket.request.max.bytes=104857600

 

 

############################# Log Basics #############################

 

# A comma separated list of directories under which to store log files

log.dirs=/u01/data/kafka

 

# The default number of log partitions per topic. More partitions allow greater

# parallelism for consumption, but this will also result in more files across

# the brokers.

num.partitions=1

 

# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.

# This value is recommended to be increased for installations with data dirs located in RAID array.

num.recovery.threads.per.data.dir=1

 

############################# Internal Topic Settings  #############################

# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"

# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.

offsets.topic.replication.factor=1

transaction.state.log.replication.factor=1

transaction.state.log.min.isr=1

 

############################# Log Flush Policy #############################

 

# Messages are immediately written to the filesystem but by default we only fsync() to sync

# the OS cache lazily. The following configurations control the flush of data to disk.

# There are a few important trade-offs here:

#    1. Durability: Unflushed data may be lost if you are not using replication.

#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.

#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.

# The settings below allow one to configure the flush policy to flush data after a period of time or

# every N messages (or both). This can be done globally and overridden on a per-topic basis.

 

# The number of messages to accept before forcing a flush of data to disk

#log.flush.interval.messages=10000

 

# The maximum amount of time a message can sit in a log before we force a flush

#log.flush.interval.ms=1000

 

############################# Log Retention Policy #############################

 

# The following configurations control the disposal of log segments. The policy can

# be set to delete segments after a period of time, or after a given size has accumulated.

# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens

# from the end of the log.

 

# The minimum age of a log file to be eligible for deletion due to age

log.retention.hours=168

 

# A size-based retention policy for logs. Segments are pruned from the log unless the remaining

# segments drop below log.retention.bytes. Functions independently of log.retention.hours.

#log.retention.bytes=1073741824

 

# The maximum size of a log segment file. When this size is reached a new log segment will be created.

log.segment.bytes=1073741824

 

# The interval at which log segments are checked to see if they can be deleted according

# to the retention policies

log.retention.check.interval.ms=300000

 

############################# Zookeeper #############################

 

# Zookeeper connection string (see zookeeper docs for details).

# This is a comma separated host:port pairs, each corresponding to a zk

# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".

# You can also append an optional chroot string to the urls to specify the

# root directory for all kafka znodes.

zookeeper.connect=kafka01:2181,kafka02:2181,kafka03:2181

 

# Timeout in ms for connecting to zookeeper

zookeeper.connection.timeout.ms=6000

 

 

############################# Group Coordinator Settings #############################

 

# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.

# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.

# The default value for this is 3 seconds.

# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.

# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.

group.initial.rebalance.delay.ms=0

 

Kafka02 kafka03 解压步骤同上

 

[root@kafka01 config]# scp server.properties kafka02:/usr/local/kafka/config/server.properties                          100% 6899     4.7MB/s   00:00    

[root@kafka01 config]# scp server.properties kafka03:/usr/local/kafka/config/

server.properties                          100% 6899     5.2MB/s   00:00  

#修改Kafka02和kafka03scp过去的文件

[root@kafka02 config]# vim server.properties

broker.id=2

 

[root@kafka03 config]# vim server.properties

broker.id=3

 

 

#添加环境变量

[root@kafka01 ~]# echo "PATH=$PATH:/usr/local/bin/" >>/etc/profile

[root@kafka01 ~]# . /etc/profile

 

Kafka02 kafka03 步骤同上

#启动kafka

[root@kafka01 ~]# kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

[root@kafka02 ~]# kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

[root@kafka03 ~]# kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

[root@kafka01 ~]# jps

11923 QuorumPeerMain

14325 Jps

14251 Kafka

[root@kafka02 ~]# jps

14594 Jps

11814 QuorumPeerMain

14524 Kafka

[root@kafka03 ~]# jps

12177 QuorumPeerMain

13106 Kafka

13461 Jps

注释

1)Producer :消息生产者,就是向 kafka broker 发消息的客户端;

2)Consumer :消息消费者,向 kafka broker 取消息的客户端;

3)Consumer Group (CG):消费者组,由多个 consumer 组成。消费者组内每个消费者负

责消费不同分区的数据,一个分区只能由一个组内消费者消费;消费者组之间互不影响。所

有的消费者都属于某个消费者组,即消费者组是逻辑上的一个订阅者。

4)Broker :一台 kafka 服务器就是一个 broker。一个集群由多个 broker 组成。一个 broker

可以容纳多个 topic。

5)Topic :可以理解为一个队列,生产者和消费者面向的都是一个 topic;

6)Partition:为了实现扩展性,一个非常大的 topic 可以分布到多个 broker(即服务器)上,

一个 topic 可以分为多个 partition,每个 partition 是一个有序的队列;

7)Replica:副本,为保证集群中的某个节点发生故障时,该节点上的 partition 数据不丢失, 

且 kafka 仍然能够继续工作,kafka 提供了副本机制,一个 topic 的每个分区都有若干个副本, 一个 leader 和若干个 follower。

8)leader:每个分区多个副本的“主”,生产者发送数据的对象,以及消费者消费数据的对

象都是 leader。

9)follower:每个分区多个副本中的“从”,实时从 leader 中同步数据,保持和 leader 数据

的同步。leader 发生故障时,某个 follower 会成为新的 follower。

 

测试

#添加topic

[root@kafka01 ~]# kafka-topics.sh --zookeeper kafka01:2181 --create --replication-factor 3 --partitions 1 --topic dunimo

Created topic "dunimo".

#查看topic

[root@kafka01 ~]# kafka-topics.sh --zookeeper kafka01:2181 --list

dunimo

[root@kafka01 ~]# kafka-topics.sh --zookeeper kafka02:2181 --create --replication-factor 2 --partitions 2 --topic cdb_gk

[root@kafka01 ~]# kafka-topics.sh --zookeeper kafka01:2181 --list

cdb_gk

Dunimo

#生产者生产消息

[root@kafka02 ~]# kafka-console-producer.sh --broker-list kafka01:9092 --topic dunimo03

>hello

>yangdongyang

>

消费者消费消息

[root@kafka01 ~]# kafka-console-consumer.sh --bootstrap-server kafka01:9092 --topic dunimo03

hello

yangdongyang

#查看所有消息

[root@kafka03 ~]# kafka-console-consumer.sh --bootstrap-server kafka01:9092 --from-beginning --topic dunimo03

yangdongyang

Hello

#删除消息

[root@kafka02 ~]# kafka-topics.sh --zookeeper kafka01:2181 --delete --topic dunim

o01

Topic dunimo01 is marked for deletion.

Note: This will have no impact if delete.topic.enable is not set to true.

[root@kafka02 ~]# kafka-topics.sh --zookeeper kafka01:2181 --list

__consumer_offsets

dunimo02

dunimo03

#查看一个详细topic

[root@kafka02 ~]# kafka-topics.sh --zookeeper kafka01:2181 --describe --topic dunimo03

Topic:dunimo03 PartitionCount:2 ReplicationFactor:3 Configs:

Topic: dunimo03 Partition: 0 Leader: 0 Replicas: 0,3,2 Isr: 0,3,2

Topic: dunimo03 Partition: 1 Leader: 2 Replicas: 2,0,3 Isr: 2,0,3

#更改topic dunio03的分区

[root@kafka02 ~]# kafka-topics.sh --zookeeper kafka01:2181 --alter --topic dunimo03 --partitions 5

WARNING: If partitions are increased for a topic that has a key, the partition logic or ordering of the messages will be affected

Adding partitions succeeded!

 

 

 

 

标签:1.1,root,zookeeper,local,kafka,kafka02,kafka01,搭建
来源: https://www.cnblogs.com/bjcdn/p/14972244.html