赞
踩
vim /etc/hostname
dn01
vim /etc/hostname
dn02
vim /etc/hostname
dn03
vim /etc/hostname
dn04
vim /etc/hostname
dn05
vim /etc/hostname
dn06
使用命令使其立刻生效
[root@dn01 ~]# hostname $(cat /etc/hostname)
[root@dn02 ~]# hostname $(cat /etc/hostname)
[root@dn03 ~]# hostname $(cat /etc/hostname)
[root@dn04 ~]# hostname $(cat /etc/hostname)
[root@dn05 ~]# hostname $(cat /etc/hostname)
[root@dn06 ~]# hostname $(cat /etc/hostname)
ssh-keygen -t rsa
ssh-copy-id -i /root/.ssh/id_rsa.pub dn01
ssh-copy-id -i /root/.ssh/id_rsa.pub dn02
ssh-copy-id -i /root/.ssh/id_rsa.pub dn03
ssh-copy-id -i /root/.ssh/id_rsa.pub dn04
ssh-copy-id -i /root/.ssh/id_rsa.pub dn05
ssh-copy-id -i /root/.ssh/id_rsa.pub dn06
clickhouse-client-20.10.3.30-2.noarch.rpm
clickhouse-common-static-20.10.3.30-2.x86_64.rpm
clickhouse-server-20.10.3.30-2.noarch.rpm
zookeeper-3.4.6.tar.gz
http://repo.yandex.ru/clickhouse/rpm/lts/x86_64/
tar -zxvf zookeeper-3.4.6.tar.gz -C /opt
cp /opt/zookeeper-3.4.6/conf/zoo_sample.cfg /opt/zookeeper-3.4.6/conf/zoo.cfg
mkdir /data01/zookeeper-3.4.6/dataDir
mkdir /data01/zookeeper-3.4.6/dataLogDir
dn01服务器
vim /data01/zookeeper-3.4.6/dataDir/myid
1
dn02服务器
vim /data01/zookeeper-3.4.6/dataDir/myid
2
dn03服务器
vim /data01/zookeeper-3.4.6/dataDir/myid
3
vim /opt/zookeeper-3.4.6/conf/zoo.cfg # The number of milliseconds of each tick tickTime=12000 # The number of ticks that the initial # synchronization phase can take initLimit=10 # The number of ticks that can pass between # sending a request and getting an acknowledgement syncLimit=5 # the directory where the snapshot is stored. # do not use /tmp for storage, /tmp here is just # example sakes. dataDir=/data01/zookeeper-3.4.6/dataDir dataLogDir=/data01/zookeeper-3.4.6/dataLogDir forceSync=no # the port at which the clients will connect clientPort=2181 server.1=dn01:2888:3888 server.2=dn02:2888:3888 server.3=dn03:2888:3888 # the maximum number of client connections. # increase this if you need to handle more clients #maxClientCnxns=60 # # Be sure to read the maintenance section of the # administrator guide before turning on autopurge. # # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance # # The number of snapshots to retain in dataDir #autopurge.snapRetainCount=3 # Purge task interval in hours # Set to "0" to disable auto purge feature #autopurge.purgeInterval=1
vim /etc/profile
export ZK_HOME=/opt/zookeeper-3.4.6
export PATH=$PATH:$ZK_HOME/bin
source /etc/profile
zkServer.sh start
zkServer.sh status
【优先安装】
rpm -ivh clickhouse-common-static-20.10.3.30-2.x86_64.rpm
【再次安装】
rpm -ivh clickhouse-server-20.10.3.30-2.noarch.rpm
rpm -ivh clickhouse-client-20.10.3.30-2.noarch.rpm
[root@dn01 soft]# useradd -m clickhouse
[root@dn02 soft]# useradd -m clickhouse
[root@dn03 soft]# useradd -m clickhouse
[root@dn04 soft]# useradd -m clickhouse
[root@dn05 soft]# useradd -m clickhouse
[root@dn06 soft]# useradd -m clickhouse
mkdir -p /data01/clickhouse/{dataspace,tmpspace,log,ckdata}
mkdir -p /data02/clickhouse/ckdata
mkdir -p /data03/clickhouse/ckdata
mkdir -p /data01/clickhouse/log/clickhouse-server
cd /data01/
chown -R clickhouse:clickhouse clickhouse
cd /data02/
chown -R clickhouse:clickhouse clickhouse
cd /data03/
chown -R clickhouse:clickhouse clickhouse
日志文件目录 <log>/data01/clickhouse/log/clickhouse-server/clickhouse-server.log</log> <errorlog>/data01/clickhouse/log/clickhouse-server/clickhouse-server.err.log</errorlog> 更改为9040端口号(如果9000端口已被占用需更改) <tcp_port>9040</tcp_port> 更改监听让所有正常ip可连接,如果ipv6未开放,则需要将注释 <!--<listen_host>::1</listen_host>--> <listen_host>0.0.0.0</listen_host> <listen_host>::1</listen_host> 设置默认数据存储目录 <path>/data01/clickhouse/dataspace/</path> <tmp_path>/data01/clickhouse/tmpspace/</tmp_path> 设置用户目录 <user_files_path>/data01/clickhouse/user_files/</user_files_path> 集群配置文件(分片及用户&密码设置) <include_from>/etc/clickhouse-server/metrika.xml</include_from>
以下为/etc/clickhouse-server/config.xml文件具体内容:
<?xml version="1.0"?> <yandex> <logger> <level>trace</level> <log>/data01/clickhouse/log/clickhouse-server/clickhouse-server.log</log> <errorlog>/data01/clickhouse/log/clickhouse-server/clickhouse-server.err.log</errorlog> <size>1000M</size> <count>10</count> </logger> <send_crash_reports> <enabled>false</enabled> <anonymize>false</anonymize> <endpoint>https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277</endpoint> </send_crash_reports> <http_port>8123</http_port> <tcp_port>9040</tcp_port> <mysql_port>9004</mysql_port> <openSSL> <server> <certificateFile>/etc/clickhouse-server/server.crt</certificateFile> <privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile> <dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile> <verificationMode>none</verificationMode> <loadDefaultCAFile>true</loadDefaultCAFile> <cacheSessions>true</cacheSessions> <disableProtocols>sslv2,sslv3</disableProtocols> <preferServerCiphers>true</preferServerCiphers> </server> <client> <loadDefaultCAFile>true</loadDefaultCAFile> <cacheSessions>true</cacheSessions> <disableProtocols>sslv2,sslv3</disableProtocols> <preferServerCiphers>true</preferServerCiphers> <invalidCertificateHandler> <name>RejectCertificateHandler</name> </invalidCertificateHandler> </client> </openSSL> <interserver_http_port>9009</interserver_http_port> <listen_host>0.0.0.0</listen_host> <!--<listen_host>::1</listen_host>--> <max_connections>4096</max_connections> <keep_alive_timeout>3</keep_alive_timeout> <max_concurrent_queries>100</max_concurrent_queries> <max_server_memory_usage>0</max_server_memory_usage> <max_thread_pool_size>10000</max_thread_pool_size> <max_server_memory_usage_to_ram_ratio>0.9</max_server_memory_usage_to_ram_ratio> <total_memory_profiler_step>4194304</total_memory_profiler_step> <total_memory_tracker_sample_probability>0</total_memory_tracker_sample_probability> <uncompressed_cache_size>8589934592</uncompressed_cache_size> <mark_cache_size>5368709120</mark_cache_size> <path>/data01/clickhouse/dataspace/</path> <tmp_path>/data01/clickhouse/tmpspace/</tmp_path> <user_files_path>/data01/clickhouse/user_files/</user_files_path> <user_directories> <users_xml> <path>users.xml</path> </users_xml> <local_directory> <path>/var/lib/clickhouse/access/</path> </local_directory> </user_directories> <ldap_servers> </ldap_servers> <default_profile>default</default_profile> <custom_settings_prefixes></custom_settings_prefixes> <default_database>default</default_database> <mlock_executable>true</mlock_executable> <remap_executable>false</remap_executable> <include_from>/etc/clickhouse-server/metrika.xml</include_from> <remote_servers incl="clickhouse_remote_servers" > </remote_servers> <remote_url_allow_hosts> </remote_url_allow_hosts> <zookeeper incl="zookeeper-servers" optional="true" /> <macros incl="macros" optional="true" /> <builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval> <max_session_timeout>3600</max_session_timeout> <default_session_timeout>60</default_session_timeout> <part_log> <database>system</database> <table>part_log</table> <flush_interval_milliseconds>7500</flush_interval_milliseconds> </part_log> <query_log> <database>system</database> <table>query_log</table> <partition_by>toYYYYMM(event_date)</partition_by> <flush_interval_milliseconds>7500</flush_interval_milliseconds> </query_log> <trace_log> <database>system</database> <table>trace_log</table> <partition_by>toYYYYMM(event_date)</partition_by> <flush_interval_milliseconds>7500</flush_interval_milliseconds> </trace_log> <query_thread_log> <database>system</database> <table>query_thread_log</table> <partition_by>toYYYYMM(event_date)</partition_by> <flush_interval_milliseconds>7500</flush_interval_milliseconds> </query_thread_log> <metric_log> <database>system</database> <table>metric_log</table> <flush_interval_milliseconds>7500</flush_interval_milliseconds> <collect_interval_milliseconds>1000</collect_interval_milliseconds> </metric_log> <asynchronous_metric_log> <database>system</database> <table>asynchronous_metric_log</table> <flush_interval_milliseconds>60000</flush_interval_milliseconds> </asynchronous_metric_log> <crash_log> <database>system</database> <table>crash_log</table> <partition_by /> <flush_interval_milliseconds>1000</flush_interval_milliseconds> </crash_log> <dictionaries_config>*_dictionary.xml</dictionaries_config> <compression incl="clickhouse_compression"> </compression> <distributed_ddl> <path>/data01/clickhouse/task_queue/ddl</path> </distributed_ddl> <graphite_rollup_example> <pattern> <regexp>click_cost</regexp> <function>any</function> <retention> <age>0</age> <precision>3600</precision> </retention> <retention> <age>86400</age> <precision>60</precision> </retention> </pattern> <default> <function>max</function> <retention> <age>0</age> <precision>60</precision> </retention> <retention> <age>3600</age> <precision>300</precision> </retention> <retention> <age>86400</age> <precision>3600</precision> </retention> </default> </graphite_rollup_example> <format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path> </yandex>
配置3分片2复制集群模式
shard> <internal_replication>true</internal_replication> <replica> <host>dn01</host> <port>9040</port> <user>default</user> <password>Test2023</password> <user>ck_test</user> <password>Test2023</password> </replica> <replica> <host>dn02</host> <port>9040</port> <user>default</user> <password>Test2023</password> <user>ck_test</user> <password>Test2023</password> </replica> </shard> zookeeper配置 <zookeeper-servers> <node index="1"> <host>dn01</host> <port>2181</port> </node> <node index="2"> <host>dn02</host> <port>2181</port> </node> <node index="3"> <host>dn03</host> <port>2181</port> </node> </zookeeper-servers>
macros配置(每台服务器配置有所不同,按照3分片2复制排序配置):
dn01
<macros>
<cluster>ck_cluster</cluster>
<shard>01</shard>
<replica>01</replica>
</macros>
dn02
<macros>
<cluster>ck_cluster</cluster>
<shard>01</shard>
<replica>02</replica>
</macros>
dn03
<macros>
<cluster>ck_cluster</cluster>
<shard>02</shard>
<replica>01</replica>
</macros>
dn04
<macros>
<cluster>ck_cluster</cluster>
<shard>02</shard>
<replica>02</replica>
</macros>
dn05
<macros>
<cluster>ck_cluster</cluster>
<shard>03</shard>
<replica>01</replica>
</macros>
dn06
<macros>
<cluster>ck_cluster</cluster>
<shard>03</shard>
<replica>02</replica>
</macros>
以下为dn01 /etc/clickhouse-server/metrika.xml配置文件
<yandex> <clickhouse_remote_servers> <ck_cluster> <shard> <internal_replication>true</internal_replication> <replica> <host>dn01</host> <port>9040</port> <user>default</user> <password>Test2023</password> <user>ck_test</user> <password>Test2023</password> </replica> <replica> <host>dn02</host> <port>9040</port> <user>default</user> <password>Test2023</password> <user>ck_test</user> <password>Test2023</password> </replica> </shard> <shard> <internal_replication>true</internal_replication> <replica> <host>dn03</host> <port>9040</port> <user>default</user> <password>Test2023</password> <user>ck_test</user> <password>Test2023</password> </replica> <replica> <host>dn04</host> <port>9040</port> <user>default</user> <password>Test2023</password> <user>ck_test</user> <password>Test2023</password> </replica> </shard> <shard> <internal_replication>true</internal_replication> <replica> <host>dn05</host> <port>9040</port> <user>default</user> <password>Test2023</password> <user>ck_test</user> <password>Test2023</password> </replica> <replica> <host>dn06</host> <port>9040</port> <user>default</user> <password>Test2023</password> <user>ck_test</user> <password>Test2023</password> </replica> </shard> </ck_cluster> </clickhouse_remote_servers> <zookeeper-servers> <node index="1"> <host>dn01</host> <port>2181</port> </node> <node index="2"> <host>dn02</host> <port>2181</port> </node> <node index="3"> <host>dn03</host> <port>2181</port> </node> </zookeeper-servers> <networks> <ip>::/0</ip> </networks> <clickhouse_compression> <case> <min_part_size>10000000000</min_part_size> <min_part_size_ratio>0.01</min_part_size_ratio> <method>lz4</method> </case> </clickhouse_compression> <macros> <cluster>ck_cluster</cluster> <shard>01</shard> <replica>01</replica> </macros> </yandex>
配置default用户最大执行内存,最小执行内存 <max_memory_usage>80000000000</max_memory_usage> <max_memory_usage_for_all_queries>360000000000</max_memory_usage_for_all_queries> 配置只读权限用户 <max_memory_usage>60000000000</max_memory_usage> <max_memory_usage_for_all_queries>360000000000</max_memory_usage_for_all_queries> 配置default用户并用sha256生成密码:echo -n XXXXX | openssl dgst -sha256 <default> <password_sha256_hex>412baa28cbad46cbbb355d9d5baec5bf849d70053e83e99f0ad19475c50c6817</password_sha256_hex> <networks incl="networks" replace="replace"> <ip>::/0</ip> </networks> <profile>default</profile> <quota>default</quota> </default> 新增ck_test用户只读权限 <ck_test> <password_sha256_hex>febbeb3effab1c9028fc7d1e1f42d304b01e65c00244e7000e2610ebbc88fa75</password_sha256_hex> <networks incl="networks" replace="replace"> <ip>::/0</ip> </networks> <profile>readonly</profile> <quota>default</quota> </ck_test>
以下为dn01 /etc/clickhouse-server/users.xml配置文件
<?xml version="1.0"?> <yandex> <!-- Profiles of settings. --> <profiles> <!-- Default settings. --> <default> <!-- Maximum memory usage for processing single query, in bytes. --> <max_memory_usage>80000000000</max_memory_usage> <max_memory_usage_for_all_queries>360000000000</max_memory_usage_for_all_queries> <!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. --> <use_uncompressed_cache>0</use_uncompressed_cache> <!-- How to choose between replicas during distributed query processing. random - choose random replica from set of replicas with minimum number of errors nearest_hostname - from set of replicas with minimum number of errors, choose replica with minimum number of different symbols between replica's hostname and local hostname (Hamming distance). in_order - first live replica is chosen in specified order. first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors. --> <load_balancing>random</load_balancing> <constraints> <max_memory_usage> <min>100000</min> <max>10000000000</max> </max_memory_usage> <force_index_by_date> <readonly/> </force_index_by_date> </constraints> </default> <!-- Profile that allows only read queries. --> <readonly> <max_memory_usage>60000000000</max_memory_usage> <max_memory_usage_for_all_queries>360000000000</max_memory_usage_for_all_queries> <use_uncompressed_cache>0</use_uncompressed_cache> <max_replica_delay_for_distributed_queries>0</max_replica_delay_for_distributed_queries> <load_balancing>random</load_balancing> <readonly>1</readonly> </readonly> </profiles> <!-- Users and ACL. --> <users> <default> <password_sha256_hex>412baa28cbad46cbbb355d9d5baec</password_sha256_hex> <networks incl="networks" replace="replace"> <ip>::/0</ip> </networks> <profile>default</profile> <quota>default</quota> </default> <ck_dev> <password_sha256_hex>febbeb3effab1c9028fc7d1e1f42d304</password_sha256_hex> <networks incl="networks" replace="replace"> <ip>::/0</ip> </networks> <profile>default</profile> <quota>default</quota> </ck_dev> </users> <!-- Quotas. --> <quotas> <!-- Name of quota. --> <default> <!-- Limits for time interval. You could specify many intervals with different limits. --> <interval> <!-- Length of interval. --> <duration>3600</duration> <!-- No limits. Just calculate resource usage for time interval. --> <queries>0</queries> <errors>0</errors> <result_rows>0</result_rows> <read_rows>0</read_rows> <execution_time>0</execution_time> </interval> </default> </quotas> </yandex>
存储数据库数据目录及磁盘空间剩余空间配置,keep_free_space_bytes节点表示触发清除旧数据的临界值,即当磁盘空间剩余107374182400字节时,ClickHouse将删除旧数据文件以释放磁盘空间。这可以防止磁盘空间不足而导致数据丢失。
<path>/data01/clickhouse/ckdata/</path>
<keep_free_space_bytes>107374182400</keep_free_space_bytes>
ClickHouse存储策略
<policies>
<ck_jbod>
<volumes>
<jbod_volume>
<disk>data01</disk>
<disk>data02</disk>
<disk>data03</disk>
</jbod_volume>
</volumes>
</ck_jbod>
</policies>
定义了一个名为“ck_jbod”的ClickHouse存储策略,该策略使用JBOD (Just a Bunch Of Disks)配置。
具体来说,它定义了一个名为“jbod_volume”的卷,它横跨三个磁盘:“data01”、“data02”和“data03”。此策略可用于跨多个磁盘分发数据,以提高性能和实现冗余。
启动clickhouse服务
sudo systemctl start clickhouse-server
查看clickhouse服务启动状态
sudo systemctl status clickhouse-server
停止clickhouse服务
sudo systemctl stop clickhouse-server
重新启动clickhouse服务
sudo systemctl restart clickhouse-server
测试每台服务器clickhouse-client 命令行是否登录,如果有异常查看日志
clickhouse-client --port 9040 -u default --password Test2023
<Error> Application: DB::Exception: Listen [::1]:8123 failed: Poco::Exception. Code: 1000, e.code() = 99, e.displayText() = Net Exception: Cannot assign requested address: [::1]:8123 (version 20.10.3.30 (official build))
原因:这个错误通常表示ClickHouse服务器在尝试侦听IP地址"[::1]:8123"时发生故障。 "[::1]"是IPv6地址,表示localhost(127.0.0.1)的IPv6地址。
解决方法:
a.检查端口占用情况, 如果该端口已被占用,则需要找到哪个进程使用该端口,并确保关闭该进程或使用其他端口。
lsof -i :8123
b.检查网络配置尝试通过ping命令测试网络连接,确保localhost正确映射到IPv6地址[::1]和IPv4地址127.0.0.1
ping localhost
ping6 ::1
ping 127.0.0.1
c.检查ClickHouse配置文件中是否正确配置了网络设置
<listen_host>::1</listen_host>
<listen_host>127.0.0.1</listen_host>
d.禁用IPv6:如果IPv6不是必需的,可以禁用它。检查/etc/sysctl.conf文件,确认是否已经禁用,1表示禁用
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
e.用于重新加载sysctl.conf
sysctl -p
f.注释/etc/clickhouse-server/config.xml文件ipv6相关配置
<!--<listen_host>::1</listen_host>-->
报错信息:
ClickHouse client version 20.10.3.30 (official build).
Connecting to localhost:9040 as user ck_dev.
Code: 210. DB::NetException: Connection refused (localhost:9040)
原因:未禁用ipv6
解决方法:
a.禁用ipv6,编辑文件/etc/sysctl.conf,添加以下内容
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
b.用于重新加载sysctl.conf
sysctl -p
c.重启clickhouse服务
sudo systemctl restart clickhouse-server
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。