Примеры конфигурации¶
Пример статического инвентаря для режима проксирования¶
---
all:
vars:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: "tcs-app"
tcs_v1_support: true
tarantool_3_0_version_support: true
tarantool_systemd_scope: user
cartridge_systemd_dir: "{{ tarantool_systemd_userspace_dir }}"
etcd_cluster_prefix: /cluster
tarantool_etcd_host: "10.95.207.115"
tarantool_config_etcd_endpoints:
- http://10.95.207.113:2379
- http://10.95.207.115:2379
- http://10.95.207.117:2379
tarantool_config_global:
credentials:
users:
replicator:
password: 'secret'
roles: [replication]
privileges:
- permissions: [execute]
functions: [failover.execute]
client:
password: 'secret'
roles: [super]
fiber:
slice:
err: 1000000
warn: 1000000
iproto:
advertise:
peer:
login: 'replicator'
failover:
lease_interval: 15
renew_interval: 9
probe_interval: 1
replication:
failover: supervised
children:
storages:
children:
tcs_storages_r01:
vars:
tarantool_config_group:
roles: [tcs_roles/storage, tcs_roles/stateboard, roles.metrics-export, tcs_roles/cpu]
tcs_storages_r01:
hosts:
tcs-r01-s01:
tcs-r01-s02:
vars:
replicaset_alias: storages
tarantool_group_name: storages
tarantool_config_replicaset:
memtx:
memory: 161167734630 #150Gb
database:
replicaset_uuid: 31b897a3-fce5-4e0e-896d-546659745d80
schedulers:
children:
schedulers_r01:
vars:
tarantool_config_group:
roles: [tcs_roles/scheduler, tcs_roles/stateboard, tcs_roles/cpu]
schedulers_r01:
hosts:
tcs-r01-c01:
tcs-r01-c02:
vars:
replicaset_alias: schedulers
tarantool_group_name: schedulers
tarantool_config_replicaset:
memtx:
memory: 5368709120 #5Gb
tcs_storage_1:
hosts:
tcs-r01-s01:
database:
instance_uuid: a1e1ef05-fb23-46ff-aeef-bdb02e6c4cdd
iproto:
listen:
- uri: "10.95.207.113:3311"
advertise:
client: "10.95.207.113:3311"
labels:
server: "10.95.207.113"
roles_cfg:
tcs_roles/storage:
arrow_flight_sql:
listen: "10.95.207.113:50051"
credentials:
username: tcs
password: tcs
advertise:
client: "10.95.207.113:50051"
sharding:
uri: "10.95.207.113:50051"
http:
enabled: true
listen: "10.95.207.113:7777"
credentials:
username: tcs
password: tcs
advertise:
client: "10.95.207.113:7777"
roles.metrics-export:
http:
- listen: 8681
endpoints:
- path: /metrics
format: prometheus
vars:
ansible_host: "10.95.207.113"
ansible_port: 22
ansible_user: '{{ super_user }}'
scheduler_1:
hosts:
tcs-r01-c01:
iproto:
listen:
- uri: "10.95.207.113:3334"
advertise:
client: "10.95.207.113:3334"
labels:
server: "10.95.207.113"
roles_cfg:
tcs_roles/scheduler:
mode:
proxy:
target_replicaset: storages
arrow_flight_sql:
listen: "10.95.207.113:50052"
credentials:
username: tcs
password: tcs
http:
enabled: true
listen: "10.95.207.113:7778"
credentials:
username: tcs
password: tcs
vars:
ansible_host: "10.95.207.113"
ansible_port: 22
ansible_user: '{{ super_user }}'
failover_coordinator_1:
hosts:
failover-coordinator-01:
tarantool_coordinator: true
vars:
replicaset_alias: failover_coordinators
ansible_host: "10.95.207.113"
ansible_port: 22
ansible_user: '{{ super_user }}'
tcs_storage_2:
hosts:
tcs-r01-s02:
database:
instance_uuid: 003da004-041d-404a-8948-22cdde6fd25a
iproto:
listen:
- uri: "10.95.207.115:3311"
advertise:
client: "10.95.207.115:3311"
labels:
server: "10.95.207.115"
roles_cfg:
tcs_roles/storage:
arrow_flight_sql:
listen: "10.95.207.115:50051"
credentials:
username: tcs
password: tcs
advertise:
client: "10.95.207.115:50051"
sharding:
uri: "10.95.207.115:50051"
http:
enabled: true
listen: "10.95.207.115:7777"
credentials:
username: tcs
password: tcs
advertise:
client: "10.95.207.115:7777"
roles.metrics-export:
http:
- listen: 8681
endpoints:
- path: /metrics
format: prometheus
vars:
ansible_host: "10.95.207.115"
ansible_port: 22
ansible_user: "{{ super_user }}"
scheduler_2:
hosts:
tcs-r01-c02:
iproto:
listen:
- uri: "10.95.207.115:3334"
advertise:
client: "10.95.207.115:3334"
labels:
server: "10.95.207.115"
roles_cfg:
tcs_roles/scheduler:
mode:
proxy:
target_replicaset: storages
arrow_flight_sql:
listen: "10.95.207.115:50052"
credentials:
username: tcs
password: tcs
http:
enabled: true
listen: "10.95.207.115:7778"
credentials:
username: tcs
password: tcs
vars:
ansible_host: "10.95.207.115"
ansible_port: 22
ansible_user: "{{ super_user }}"
failover_coordinator_2:
hosts:
failover-coordinator-02:
tarantool_coordinator: true
vars:
replicaset_alias: failover_coordinators
ansible_host: "10.95.207.115"
ansible_port: 22
ansible_user: "{{ super_user }}"
Пример статического инвентаря для режима шардирования¶
---
all:
vars:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: "tcs-app"
tcs_v1_support: true
tarantool_3_0_version_support: true
tarantool_systemd_scope: user
cartridge_systemd_dir: "{{ tarantool_systemd_userspace_dir }}"
etcd_cluster_prefix: /cluster
tarantool_etcd_host: "10.95.207.115"
tarantool_config_etcd_endpoints:
- http://10.95.207.113:2379
- http://10.95.207.115:2379
- http://10.95.207.117:2379
tarantool_config_global:
credentials:
users:
replicator:
password: 'secret'
roles: [replication]
privileges:
- permissions: [execute]
functions: [failover.execute]
client:
password: 'secret'
roles: [super]
fiber:
slice:
err: 1000000
warn: 1000000
iproto:
advertise:
peer:
login: 'replicator'
failover:
lease_interval: 15
renew_interval: 9
probe_interval: 1
replication:
failover: supervised
children:
storages:
children:
tcs_storages_r01:
vars:
tarantool_config_group:
roles: [tcs_roles/storage, tcs_roles/stateboard, roles.metrics-export, tcs_roles/cpu]
tcs_storages_r01:
hosts:
tcs-r01-s01:
tcs-r01-s02:
vars:
replicaset_alias: storages
tarantool_group_name: storages
tarantool_config_replicaset:
memtx:
memory: 161167734630 #150Gb
database:
replicaset_uuid: 31b897a3-fce5-4e0e-896d-546659745d80
schedulers:
children:
schedulers_r01:
vars:
tarantool_config_group:
roles: [tcs_roles/scheduler, tcs_roles/stateboard, tcs_roles/cpu]
schedulers_r01:
hosts:
tcs-r01-c01:
tcs-r01-c02:
vars:
replicaset_alias: schedulers
tarantool_group_name: schedulers
tcs_storage_1:
hosts:
tcs-r01-s01:
database:
instance_uuid: a1e1ef05-fb23-46ff-aeef-bdb02e6c4cdd
iproto:
listen:
- uri: "10.95.207.113:3311"
advertise:
client: "10.95.207.113:3311"
labels:
server: "10.95.207.113"
roles_cfg:
tcs_roles/storage:
enable_sharding: true
arrow_flight_sql:
listen: "10.95.207.113:50051"
credentials:
username: tcs
password: tcs
advertise:
client: "10.95.207.113:50051"
sharding:
uri: "10.95.207.113:50051"
http:
enabled: true
listen: "10.95.207.113:7777"
credentials:
username: tcs
password: tcs
advertise:
client: "10.95.207.113:7777"
roles.metrics-export:
http:
- listen: 8681
endpoints:
- path: /metrics
format: prometheus
vars:
ansible_host: "10.95.207.113"
ansible_port: 22
ansible_user: '{{ super_user }}'
scheduler_1:
hosts:
tcs-r01-c01:
iproto:
listen:
- uri: "10.95.207.113:3334"
advertise:
client: "10.95.207.113:3334"
labels:
server: "10.95.207.113"
roles_cfg:
tcs_roles/scheduler:
mode:
sharded:
bucket_count: 1000
arrow_flight_sql:
listen: "10.95.207.113:50052"
credentials:
username: tcs
password: tcs
http:
enabled: true
listen: "10.95.207.113:7778"
credentials:
username: tcs
password: tcs
vars:
ansible_host: "10.95.207.113"
ansible_port: 22
ansible_user: '{{ super_user }}'
failover_coordinator_1:
hosts:
failover-coordinator-01:
tarantool_coordinator: true
vars:
replicaset_alias: failover_coordinators
ansible_host: "10.95.207.113"
ansible_port: 22
ansible_user: '{{ super_user }}'
tcs_storage_2:
hosts:
tcs-r01-s02:
database:
instance_uuid: 003da004-041d-404a-8948-22cdde6fd25a
iproto:
listen:
- uri: "10.95.207.115:3311"
advertise:
client: "10.95.207.115:3311"
labels:
server: "10.95.207.115"
roles_cfg:
tcs_roles/storage:
enable_sharding: true
arrow_flight_sql:
listen: "10.95.207.115:50051"
credentials:
username: tcs
password: tcs
advertise:
client: "10.95.207.115:50051"
sharding:
uri: "10.95.207.115:50051"
http:
enabled: true
listen: "10.95.207.115:7777"
credentials:
username: tcs
password: tcs
advertise:
client: "10.95.207.115:7777"
roles.metrics-export:
http:
- listen: 8681
endpoints:
- path: /metrics
format: prometheus
vars:
ansible_host: "10.95.207.115"
ansible_port: 22
ansible_user: "{{ super_user }}"
scheduler_2:
hosts:
tcs-r01-c02:
iproto:
listen:
- uri: "10.95.207.115:3334"
advertise:
client: "10.95.207.115:3334"
labels:
server: "10.95.207.115"
roles_cfg:
tcs_roles/scheduler:
mode:
sharded:
bucket_count: 1000
arrow_flight_sql:
listen: "10.95.207.115:50052"
credentials:
username: tcs
password: tcs
http:
enabled: true
listen: "10.95.207.115:7778"
credentials:
username: tcs
password: tcs
vars:
ansible_host: "10.95.207.115"
ansible_port: 22
ansible_user: "{{ super_user }}"
failover_coordinator_2:
hosts:
failover-coordinator-02:
tarantool_coordinator: true
vars:
replicaset_alias: failover_coordinators
ansible_host: "10.95.207.115"
ansible_port: 22
ansible_user: "{{ super_user }}"
Пример вызова плейбука etcd_3_0.yml для отправки конфигурации в etcd¶
Пример отправки конфигурации кластера TCS в etcd с помощью плейбука etcd_3_0.yml:
export PATH_TO_INVENTORY="/ATE/1.1.0_tarantool/inventory-tcs"
export SUPER_USER_NAME=root
export DEPLOY_TOOL_VERSION_TAG=1.15.0
export PATH_TO_PRIVATE_KEY="/root/.ssh/id_rsa"
docker run --network host --rm \
-v ${PATH_TO_PRIVATE_KEY}:/ansible/.ssh/id_private_key:Z \
-v ${PATH_TO_INVENTORY}:/ansible/inventories/hosts.yml:Z \
-e SUPER_USER_NAME=${SUPER_USER_NAME} \
ansible-tarantool-enterprise:${DEPLOY_TOOL_VERSION_TAG} \
ansible-playbook -i /ansible/inventories/hosts.yml \
--extra-vars '{
"ansible_ssh_private_key_file":"/ansible/.ssh/id_private_key",
"super_user":"'${SUPER_USER_NAME}'",
"tarantool_shared_become_user":"tarantool",
"tarantool_shared_hosts":"tcs-r01-s01,tcs-r01-s02,tcs-r01-c01,tcs-r01-c02"
}' \
playbooks/etcd_3_0.yml
Пример вызова плейбука tcs/install.yml для развертывания кластера¶
Пример развертывания кластера TCS с нуля с помощью плейбука tcs/install.yml:
export PATH_TO_INVENTORY="/ATE/1.1.0_tarantool/inventory-tcs"
export SUPER_USER_NAME=root
export DEPLOY_TOOL_VERSION_TAG=1.15.0
export PATH_TO_PRIVATE_KEY="/root/.ssh/id_rsa"
export PACKAGE_NAME=tarantool_column_store.x86_64.tar.gz
export PATH_TO_PACKAGE="/TCS/tarantool3"
docker run --network host --rm \
-v ${PATH_TO_PRIVATE_KEY}:/ansible/.ssh/id_private_key:Z \
-v ${PATH_TO_INVENTORY}:/ansible/inventories/hosts.yml:Z \
-v ${PATH_TO_PACKAGE}/${PACKAGE_NAME}:/ansible/packages/${PACKAGE_NAME}:Z \
-e SUPER_USER_NAME=${SUPER_USER_NAME} \
-e PACKAGE_NAME=${PACKAGE_NAME} \
ansible-tarantool-enterprise:${DEPLOY_TOOL_VERSION_TAG} \
ansible-playbook -i /ansible/inventories/hosts.yml \
--extra-vars '{
"cartridge_package_path":"/ansible/packages/'${PACKAGE_NAME}'",
"ansible_ssh_private_key_file":"/ansible/.ssh/id_private_key",
"super_user":"'${SUPER_USER_NAME}'",
"tarantool_shared_become_user":"tarantool",
"tarantool_shared_hosts":"tcs-r01-s01,tcs-r01-s02,failover_coordinator_1,failover_coordinator_2,tcs-r01-c01,tcs-r01-c02"
}' \
playbooks/tcs/install.yml
Пример вызова плейбука tcs/install.yml для добавления экземпляра¶
Пример добавления экземпляра Scheduler с именем tcs-r01-s03:
export PATH_TO_INVENTORY="/ATE/1.1.0_tarantool/inventory-tcs-common-etcd_sharded"
export SUPER_USER_NAME=root
export DEPLOY_TOOL_VERSION_TAG=1.15.0
export PATH_TO_PRIVATE_KEY="/root/.ssh/id_rsa"
export PACKAGE_NAME=tarantool_column_store.x86_64.tar.gz
export PATH_TO_PACKAGE="/TCS/tarantool"
docker run --network host --rm \
-v ${PATH_TO_PRIVATE_KEY}:/ansible/.ssh/id_private_key:Z \
-v ${PATH_TO_INVENTORY}:/ansible/inventories/hosts.yml:Z \
-v ${PATH_TO_PACKAGE}/${PACKAGE_NAME}:/ansible/packages/${PACKAGE_NAME}:Z \
-e SUPER_USER_NAME=${SUPER_USER_NAME} \
-e PACKAGE_NAME=${PACKAGE_NAME} \
ansible-tarantool-enterprise:${DEPLOY_TOOL_VERSION_TAG} \
ansible-playbook -i /ansible/inventories/hosts.yml \
--extra-vars '{
"cartridge_package_path":"/ansible/packages/'${PACKAGE_NAME}'",
"ansible_ssh_private_key_file":"/ansible/.ssh/id_private_key",
"super_user":"'${SUPER_USER_NAME}'",
"tarantool_shared_become_user":"tarantool",
"tarantool_shared_hosts":"tcs-r01-s03
}' \
playbooks/tcs/install.yml
Пример вызова плейбука uninstall.yml для удаления кластера¶
Пример удаления развернутого кластера TCS с помощью плейбука uninstall.yml:
export SUPER_USER_NAME=root
export DEPLOY_TOOL_VERSION_TAG=1.15.0
export PATH_TO_PRIVATE_KEY="/root/.ssh/id_rsa"
export PATH_TO_INVENTORY="/ATE/1.1.0_tarantool/inventory-tcs"
echo "Uninstall Storages and Failover Coordinators"
echo "-------------------------------------------------------------------------"
sudo docker run --network host --rm \
-v ${PATH_TO_PRIVATE_KEY}:/ansible/.ssh/id_private_key:Z \
-v ${PATH_TO_INVENTORY}:/ansible/inventories/hosts.yml:Z \
-e SUPER_USER_NAME=${SUPER_USER_NAME} \
ansible-tarantool-enterprise:${DEPLOY_TOOL_VERSION_TAG} \
ansible-playbook -i /ansible/inventories/hosts.yml \
--extra-vars '{
"ansible_ssh_private_key_file":"/ansible/.ssh/id_private_key",
"super_user":"'${SUPER_USER_NAME}'",
"tarantool_shared_become_user":"tarantool",
"confirm_deletion":false
}' \
playbooks/uninstall.yml --tags 'tarantool' --limit 'failover-coordinator-01, failover-coordinator-02, tcs-r01-s01, tcs-r01-s02, tcs-r01-c01, tcs-r01-c02'
echo "-----------------------------------Done----------------------------------"