Примеры инвентарей¶
Здесь собраны примеры файлов инвентаря inventory.yml для различных продуктов, используемых в тестах. Эти файлы содержат пример конфигурации инвентаря для каждого продукта.
Статический инвентарь — это вручную созданный текстовый файл со списком хостов, групп и переменных.
Динамический инвентарь — это файл для Ansible-плагина (например, генератора), который на основе данных динамически формирует список хостов, групп и переменных.
Tarantool 3.x¶
Сценарии для приложений и продуктов на основе Tarantool 3.x Enterprise Edition приведены в разделе Tarantool 3.x. Пример инвентаря для централизованного хранилища конфигурации на основе Tarantool приведен в разделе Tarantool Config Storage.
Статический инвентарь¶
---
tarantool:
children:
ROUTERS:
children:
router:
vars:
tarantool_config_group:
app:
module: router
sharding:
roles: [router]
roles:
- roles.another-role
STORAGES:
children:
storage-1:
storage-2:
vars:
tarantool_config_group:
app:
module: storage
cfg:
space_name: super-duper-space
sharding:
roles: [storage]
memtx:
memory: 1000241024
roles:
- roles.my-role
storage-1:
hosts:
kvee-r01-s01:
kvee-r01-s02:
vars:
replicaset_alias: storage-1
tarantool_config_replicaset:
memtx:
memory: 512000000
storage-2:
hosts:
kvee-r02-s01:
kvee-r02-s02:
vars:
replicaset_alias: storage-2
router:
hosts:
kvee-router-01:
vars:
replicaset_alias: router-1
vm_1:
hosts:
kvee-r01-s01:
iproto:
listen:
- uri: 127.0.0.1:3310
advertise:
client: 127.0.0.1:3310
labels:
server: "{{ ansible_host }}"
kvee-r02-s01:
iproto:
listen:
- uri: 127.0.0.1:3311
advertise:
client: 127.0.0.1:3311
labels:
server: "{{ ansible_host }}"
kvee-router-01:
iproto:
listen:
- uri: 127.0.0.1:3312
advertise:
client: 127.0.0.1:3312
labels:
server: "{{ ansible_host }}"
kvee-r01-s02:
labels:
server: "{{ ansible_host }}"
kvee-r02-s02:
labels:
server: "{{ ansible_host }}"
vars:
ansible_host: 127.0.0.1
ansible_user: "{{ super_user }}"
vars:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: kvee
tarantool_etcd_host: "{{ tarantool_etcd_host }}"
tarantool_config_global:
fiber:
slice:
err: 15
credentials:
users:
client:
password: 'secret'
roles: [super]
replicator:
password: 'secret'
roles: [replication]
storage:
password: 'secret'
roles: [sharding]
roles_cfg:
my-role:
variable_1: 10
variable_2: my-user
another-role:
welcome_message: "Hello there, general Kenobi!"
iproto:
advertise:
peer:
login: replicator
sharding:
login: storage
client: 'unix/:{{ cartridge_run_dir }}/{% raw %}{{ instance_name }}{% endraw %}.iproto'
listen:
- uri: unix/:/app/tarantool/kvee/run/{% raw %}{{ instance_name }}{% endraw %}.iproto
Динамический инвентарь¶
plugin: tarantool.enterprise.generator
cluster_name: tarantool
product: tarantool
# distribution_strategy: StrategyEvenByZone
constants:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: kvee
tarantool_etcd_host: "{{ tarantool_etcd_host }}"
tarantool_wait_alive_delay: 2
tarantool_wait_alive_retries: 50
tarantool_config_etcd_endpoints:
- http://{{ tarantool_etcd_host }}:{{ tarantool_etcd_port }}
- http://etcd-molecule.molecule:{{ tarantool_etcd_port }}
tarantool_config_global:
fiber:
slice:
err: 15
credentials:
users:
client:
password: 'secret'
roles: [super]
replicator:
password: 'secret'
roles: [replication]
storage:
password: 'secret'
roles: [sharding]
roles_cfg:
my-role:
variable_1: 10
variable_2: my-user
another-role:
welcome_message: "Hello there, general Kenobi!"
compat:
box_error_serialize_verbose: "new"
iproto:
advertise:
peer:
login: replicator
sharding:
login: storage
client: 'unix/:{{ cartridge_run_dir }}/{% raw %}{{ instance_name }}{% endraw %}.iproto'
listen:
- uri: unix/:/app/tarantool/kvee/run/{% raw %}{{ instance_name }}{% endraw %}.iproto
servers:
- name: 'vm_1'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
- name: 'vm_2'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
zone: 'DC5'
user: '{{ super_user }}'
port_starts:
iproto: 3401
http: 9091
# - name: 'vm_3'
# host: 127.0.0.1
# advertise_host: '127.0.0.1'
# port: 2201
# zone: 'DC5'
# user: '{{ super_user }}'
# port_starts:
# iproto: 3444
# http: 9092
components:
- name: storage
replicasets: 2
replicas: 2
config:
replicaset:
memtx:
memory: 512000000
group:
app:
module: storage
cfg:
space_name: super-duper-space
sharding:
roles: [storage]
memtx:
memory: 1000241024
roles:
- roles.my-role
- name: router
replicasets: 2
replicas: 1
config:
group:
app:
module: router
sharding:
roles: [router]
roles:
- roles.another-role
# changes:
# - type: set_variables
# hosts:
# - router
# - storage-r02-i02
# values:
# example: test
Tarantool Cartridge¶
Сценарии для приложений и продуктов на основе Tarantool Cartridge приведены в разделе Tarantool Cartridge.
Статический инвентарь¶
---
tnt-cluster-1:
children:
ROUTERS:
hosts:
kvee-router-01:
kvee-router-02:
STORAGES:
hosts:
kvee-r01-s01:
kvee-r01-s02:
kvee-r02-s01:
kvee-r02-s02:
STORAGES_GROUP_1:
hosts:
kvee-r01-s01:
kvee-r02-s01:
STORAGES_GROUP_2:
hosts:
kvee-r01-s02:
kvee-r02-s02:
replicaset_vag_router_01:
hosts:
kvee-router-01:
vars:
failover_priority:
- kvee-router-01
replicaset_alias: vag-router-01
roles:
- vshard-router
- crud-router
- failover-coordinator
- metrics
replicaset_vag_router_02:
hosts:
kvee-router-02:
vars:
failover_priority:
- kvee-router-02
replicaset_alias: vag-router-02
roles:
- vshard-router
- crud-router
- failover-coordinator
- metrics
replicaset_vag_storage_01:
hosts:
kvee-r01-s01:
kvee-r01-s02:
vars:
failover_priority:
- kvee-r01-s02
- kvee-r01-s01
replicaset_alias: vag-storage-01
roles:
- vshard-storage
- crud-storage
- metrics
- expirationd
replicaset_vag_storage_02:
hosts:
kvee-r02-s01:
kvee-r02-s02:
vars:
failover_priority:
- kvee-r02-s02
- kvee-r02-s01
replicaset_alias: vag-storage-02
roles:
- vshard-storage
- crud-storage
- metrics
- expirationd
vm_1:
hosts:
kvee-r01-s01:
kvee-r02-s01:
kvee-router-01:
kvee-router-02:
vars:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
vm_2:
hosts:
kvee-r01-s02:
kvee-r02-s02:
vars:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
hosts:
kvee-r01-s01:
config:
advertise_uri: localhost:3401
http_port: 8080
log: /app/logs/kvee/kvee-r01-s01.log
vinyl_cache: 0
vinyl_memory: 0
cartridge_extra_env:
EXAMPLE: true
cartridge_systemd_dir: "{{ tarantool_systemd_userspace_dir }}"
kvee-r01-s02:
config:
advertise_uri: localhost:3410
http_port: 8099
log: /app/logs/kvee/kvee-r01-s02.log
vinyl_cache: 0
vinyl_memory: 0
kvee-r02-s01:
config:
advertise_uri: localhost:3411
http_port: 8098
log: /app/logs/kvee/kvee-r02-s01.log
vinyl_cache: 0
vinyl_memory: 0
kvee-r02-s02:
config:
advertise_uri: localhost:3402
http_port: 8092
log: /app/logs/kvee/kvee-r02-s02.log
vinyl_cache: 0
vinyl_memory: 0
kvee-router-01:
config:
advertise_uri: localhost:3403
http_port: 8093
log: /app/logs/kvee/kvee-router-01.log
vinyl_cache: 0
vinyl_memory: 0
kvee-router-02:
config:
advertise_uri: localhost:3404
http_port: 8094
log: /app/logs/kvee/kvee-router-02.log
vinyl_cache: 0
vinyl_memory: 0
vars:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_directory: /app/tarantool/kvee
cartridge_app_name: kvee
cartridge_keep_num_latest_dists: 10
cartridge_cluster_cookie: molecule-testing
cartridge_failover_params:
etcd2_params:
endpoints:
- http://{{ tarantool_etcd_host }}:2379
lock_delay: 10
prefix: /kvee-1
failover_timeout: 20
fencing_enabled: true
fencing_pause: 2
fencing_timeout: 10
mode: stateful
state_provider: etcd2
Динамический инвентарь¶
# inventory.config file in YAML format
# remember to enable this inventory plugin in the ansible.cfg before using
# View the output using `ansible-inventory -i inventory.config --list`
plugin: tarantool.enterprise.generator
product: cartridge
cluster_name: tnt-cluster-1
constants:
tarantool_collected_logs_local_path: /tmp/logs
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_directory: /app/tarantool/kvee
cartridge_app_name: kvee
cartridge_keep_num_latest_dists: 10
cartridge_cluster_cookie: molecule-testing
cartridge_failover_params:
etcd2_params:
endpoints:
- http://{{ tarantool_etcd_host }}:2379
lock_delay: 10
prefix: /kvee-1
failover_timeout: 20
fencing_enabled: true
fencing_pause: 2
fencing_timeout: 10
mode: stateful
state_provider: etcd2
servers:
- name: 'vm_1'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
zone: 'DC2'
port_starts:
iproto: 3444
http: 8091
- name: 'vm_2'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
zone: 'DC5'
user: '{{ super_user }}'
port_starts:
iproto: 3401
http: 9091
zones:
- name: 'DC2'
distances:
DC2: 0
DC5: 2
- name: 'DC5'
distances:
DC2: 2
DC5: 0
components:
- name: storage
replicasets: 2
replicas: 2
config:
roles:
- vshard-storage
- crud-storage
- metrics
- expirationd
vinyl_cache: 0
vinyl_memory: 0
- name: router
replicasets: 2
replicas: 1
config:
roles:
- vshard-router
- crud-router
- failover-coordinator
- metrics
vinyl_cache: 0
vinyl_memory: 0
memtx_memory: 1024
changes:
- type: set_variables
hosts:
- storage-r01-i01
values:
cartridge_extra_env:
EXAMPLE: true
cartridge_systemd_dir: "{{ tarantool_systemd_userspace_dir }}"
# vinyl_cache: 1
# vinyl_memory: 2
# - type: remove_instances
# hosts:
# - kvee-app-r001-s01
# - kvee-app-r002-s02
# - type: add_replicasets
# component: storage
# quantity: 2
# - type: add_instances
# hosts:
# - 'vm_1'
# - 'vm_2'
# component: storage
# quantity: 2
Tarantool Config Storage¶
Централизованное хранилище конфигурации на основе Tarantool (Tarantool Config Storage) используется с кластерами на основе Tarantool 3.x. Узнать больше:
Динамический инвентарь¶
Добавлено в версии 1.10.0.
plugin: tarantool.enterprise.generator
cluster_name: tarantool_config_storage
product: tarantool
# distribution_strategy: StrategyEvenByZone
constants:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: config_storage
tarantool_config_storage: 'tarantool'
is_config_storage: true
tarantool_config_global:
credentials:
users:
replicator:
password: 'secret'
roles: [replication]
config-storage:
password: '123456'
privileges:
- permissions: [read, write]
spaces: [config_storage, config_storage_meta]
- permissions: [execute]
universe: true
roles_cfg:
config.storage:
status_check_interval: 3
database:
use_mvcc_engine: true
iproto:
advertise:
peer:
login: replicator
servers:
- name: 'vm_1'
host: '{{ tarantool_etcd_host }}'
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
port_starts:
iproto: 4401
http: 9091
components:
- name: config-storage
replicasets: 1
replicas: 1
config:
replicaset:
replication:
failover: election
group:
roles:
- config.storage
Tarantool Cluster Manager¶
Сценарии для работы с TCM приведены в разделе Tarantool Cluster Manager.
Конфигурация с ETCD¶
---
cluster-manager:
hosts:
tcm:
tcm_host: 0.0.0.0
tcm_port: 8080
tcm_etcd_host: etcd
vars:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
tcm_feature_ttgraph: true
tcm_feature_column_store: true
tcm_feature_tqe: true
tcm_feature_api_token: true
tcm_bootstrap_password: "tarantool"
tcm_bootstrap_api_token: "tarantool"
tcm_initial_clusters:
- name: Tarantool cluster
description: Example cluster
storage-connection:
provider: etcd
etcd-connection:
endpoints:
- http://etcd:2379
username: ""
password: ""
prefix: "/tarantool/tcs"
tarantool-connection:
username: "client"
password: "secret"
- name: Tarantool cluster 2
description: Example cluster
storage-connection:
provider: etcd
etcd-connection:
endpoints:
- http://etcd:2379
username: ""
password: ""
prefix: "/tarantool/tcs"
tarantool-connection:
username: "client"
password: "secret"
Конфигурация с ETCD при включенном SSL¶
---
cluster-manager:
hosts:
tcm:
tcm_config_etcd_endpoints:
- "{{ tarantool_etcd_schema_definition }}://{{ tarantool_etcd_host }}:2379"
tcm_host: 0.0.0.0
tcm_port: 8080
tcm_etcd_host: etcd
tcm_etcd_tls_skip_verify: true
tcm_etcd_tls_skip_san_verify: true
tcm_etcd_tls_enabled: true
tcm_etcd_tls_ca_file: "/certs/rootCA.crt"
tcm_etcd_tls_key_file: "/certs/client.key"
tcm_etcd_tls_cert_file: "/certs/client.crt"
vars:
ansible_host: '{{ tarantool_ansible_host }}'
ansible_user: '{{ super_user }}'
ansible_port: 2201
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
tarantool_etcd_schema_definition: "https"
tcm_feature_ttgraph: true
tcm_feature_column_store: true
tcm_feature_tqe: true
tcm_feature_api_token: true
tcm_bootstrap_password: "tarantool"
tcm_bootstrap_api_token: "tarantool"
tcm_initial_clusters:
- name: Tarantool Column Store
id: 00000000-0000-0000-0000-000000000000
storage-connection:
provider: etcd
etcd-connection:
endpoints:
- "{{ tarantool_etcd_schema_definition }}://etcd:2379"
username: ""
password: ""
prefix: "/tarantool/tcs"
tarantool-connection:
username: "client"
password: "secret"
Конфигурация с ETCD при включенном basic auth¶
---
cluster-manager:
hosts:
tcm:
tcm_config_etcd_endpoints:
- "{{ tarantool_etcd_schema_definition }}://{{ tarantool_etcd_host }}:2379"
tcm_host: 0.0.0.0
tcm_port: 8080
tcm_etcd_host: etcd
tcm_etcd_username: "root"
tcm_etcd_password: "mysecurepassword"
vars:
ansible_host: '{{ tarantool_ansible_host }}'
ansible_user: '{{ super_user }}'
ansible_port: 2201
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
tarantool_etcd_schema_definition: "http"
tcm_feature_ttgraph: true
tcm_feature_column_store: true
tcm_feature_tqe: true
tcm_feature_api_token: true
tcm_bootstrap_password: "tarantool"
tcm_bootstrap_api_token: "tarantool"
tcm_initial_clusters:
- name: Tarantool Column Store
id: 00000000-0000-0000-0000-000000000000
storage-connection:
provider: etcd
etcd-connection:
endpoints:
- "{{ tarantool_etcd_schema_definition }}://etcd:2379"
username: "root"
password: "mysecurepassword"
prefix: "/tarantool/tcs"
tarantool-connection:
username: "client"
password: "secret"
Конфигурация с Tarantool Config Storage¶
cluster-manager:
hosts:
tcm:
tcm_host: 0.0.0.0
tcm_port: 8080
tcm_etcd_host: "{{ tarantool_etcd_host }}"
vars:
ansible_host: '{{ tarantool_ansible_host }}'
ansible_user: '{{ super_user }}'
ansible_port: 2201
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
tcm_feature_ttgraph: true
tcm_feature_column_store: true
tcm_feature_api_token: true
tcm_bootstrap_password: "tarantool"
tcm_bootstrap_api_token: "tarantool"
tcm_storage_provider: tarantool
tcm_config_etcd_endpoints:
- "{{ tarantool_etcd_host }}:4401"
tcm_etcd_username: "config-storage"
tcm_etcd_password: "123456"
tcm_initial_clusters:
- name: Tarantool DB
id: 00000000-0000-0000-0000-000000000000
storage-connection:
provider: tarantool
tarantool-connection:
endpoints:
- "{{ tarantool_etcd_host }}:4401"
username: "config-storage"
password: "123456"
prefix: "/tarantool/tarantooldb"
tarantool-connection:
username: "client"
password: "secret"
Tarantool Clusters Federation¶
Информация о настройке TCF и сценарии для работы с этим продуктом приведены в разделе Tarantool Clusters Federation.
Статический инвентарь¶
tcf:
vars:
tcf_user: replicator
tcf_user_password: password
tcf_dial_timeout: 5s
children:
vm-1:
vars:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
hosts:
tcf-host-1:
tcf-host-3:
vm-2:
vars:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
hosts:
tcf-host-2:
tcf-host-4:
destination:
vars:
tcf_destination_metrics_enabled: true
hosts:
tcf-host-1:
tcf-host-2:
gateway:
vars:
tcf_gateway_metrics_enabled: true
hosts:
tcf-host-3:
tcf-host-4:
hosts:
tcf-host-1:
tcf_gateway_host: localhost
tcf_gateway_port: 9099
tcf_destination_port_metrics: 9100
tcf_destination_router_uris:
- localhost:3403
- localhost:3404
tcf_destination_host: localhost
tcf_gateways:
- host: 127.0.0.1
port: 10080
- host: 127.0.0.1
port: 11080
- host: 127.0.0.1
port: 12080
tcf_destination_storage: etcd_v2
tcf_destination_storage_endpoints:
- host: "{{ tarantool_etcd_host }}"
port: 2379
tcf_destination_storage_prefix: /destination1
tcf_destination_storage_ttl: 10
tcf-host-2:
tcf_gateway_host: localhost
tcf_gateway_port: 9199
tcf_destination_port_metrics: 9101
tcf_destination_router_uris:
- localhost:3303
- localhost:3304
tcf_destination_host: localhost
tcf_gateways:
- host: 127.0.0.1
port: 10080
- host: 127.0.0.1
port: 11080
- host: 127.0.0.1
port: 12080
tcf_destination_storage: etcd_v2
tcf_destination_storage_endpoints:
- host: "{{ tarantool_etcd_host }}"
port: 2379
tcf_destination_storage_prefix: /destination2
tcf_destination_storage_ttl: 10
tcf-host-3:
tcf_gateway_port: 9299
tcf_destination_port_metrics: 9102
tcf_gateway_storage_uris:
- localhost:3401
- localhost:3402
- localhost:3410
- localhost:3411
tcf_gateway_host: localhost
tcf-host-4:
tcf_gateway_port: 9399
tcf_destination_port_metrics: 9103
tcf_gateway_storage_uris:
- localhost:3301
- localhost:3302
- localhost:3310
- localhost:3311
tcf_gateway_host: localhost
Динамический инвентарь¶
Топология репликаторов данных между кластерами (Gateway и Destination) формируется автоматически на основе описания кластеров в инвентаре. Указывать её вручную необязательно – автоматическая настройка покрывает типовые случаи. При необходимости топологию можно задать вручную.
Для настройки компонентов используются переменные, которые имеют значения по умолчанию. Их можно не задавать, если значения по умолчанию подходят. При необходимости переменные можно переопределить в конфигурации. Переменные конфигурации TCF подробно описаны в соответствующем разделе.
plugin: tarantool.enterprise.generator
product: TCF
tcf:
tcf_user: replicator
tcf_user_password: password
tarantool_collected_logs_local_path: /tmp/logs
tcf_log_directory: /app/logs/tcf
tcf_dial_timeout: 5s
tcf_gateways:
- host: 127.0.0.1
port: 10080
- host: 127.0.0.1
port: 11080
- host: 127.0.0.1
port: 12080
tcf_destination_storage: etcd_v2
tcf_destination_storage_endpoints:
- host: "{{ tarantool_etcd_host }}"
port: 2379
tcf_destination_storage_prefix: /destination1
tcf_destination_storage_ttl: 10
cluster_1:
product: cartridge
cluster_name: tnt-cluster-1
constants:
tarantool_collected_logs_local_path: /tmp/logs
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: app1
cartridge_keep_num_latest_dists: 10
cartridge_cluster_cookie: app1-molecule-testing
cartridge_failover_params:
etcd2_params:
endpoints:
- http://{{ tarantool_etcd_host }}:2379
lock_delay: 10
prefix: /app1
failover_timeout: 20
fencing_enabled: true
fencing_pause: 2
fencing_timeout: 10
mode: stateful
state_provider: etcd2
servers:
- name: 'vm_1'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
zone: 'DC2'
port_starts:
iproto: 3333
http: 7777
- name: 'vm_2'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
zone: 'DC5'
user: '{{ super_user }}'
port_starts:
iproto: 3444
http: 8888
components:
- name: storage
replicasets: 2
replicas: 2
config:
vinyl_cache: 0
vinyl_memory: 0
memtx_memory: 134217728
roles:
- vshard-storage
- metrics
- tcf-worker
- name: router
replicasets: 2
replicas: 1
config:
vinyl_cache: 0
vinyl_memory: 0
roles:
- vshard-router
- failover-coordinator
- metrics
- tcf-worker
- tcf-coordinator
# changes:
# - type: set_variables
# hosts:
# - storage
# values:
# example: var_2
cluster_2:
cluster_name: tnt-cluster-2
product: cartridge
constants:
tarantool_collected_logs_local_path: /tmp/logs
cartridge_app_name: app-two
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_keep_num_latest_dists: 10
cartridge_cluster_cookie: app2-molecule-testing
cartridge_failover_params:
etcd2_params:
endpoints:
- http://{{ tarantool_etcd_host }}:2379
lock_delay: 10
prefix: /app2
failover_timeout: 20
fencing_enabled: true
fencing_pause: 2
fencing_timeout: 10
mode: stateful
state_provider: etcd2
servers:
- name: 'vm_3'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
zone: 'DC2'
port_starts:
iproto: 3555
http: 6666
- name: 'vm_4'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
zone: 'DC5'
user: '{{ super_user }}'
port_starts:
iproto: 3666
http: 5555
components:
- name: storage-2
replicasets: 2
replicas: 2
config:
vinyl_cache: 0
vinyl_memory: 0
memtx_memory: 134217728
roles:
- vshard-storage
- metrics
- tcf-worker
- name: router-2
replicasets: 2
replicas: 1
config:
vinyl_cache: 0
vinyl_memory: 0
roles:
- vshard-router
- failover-coordinator
- metrics
- tcf-worker
- tcf-coordinator
# changes:
# - type: set_variables
# hosts:
# - storage-2
# values:
# example: test
Tarantool Column Store¶
Информация о настройке TCS и сценарии для работы с этим продуктом приведены в разделе Tarantool Column Store.
Статический инвентарь для TCS 0.x¶
---
tarantool:
children:
aggregators:
children:
storage-1:
coordinators:
vars:
tarantool_config_group:
roles: [app/aggregator_role, app/etcd_stateboard_client]
roles_cfg:
app/aggregator_role:
tcs:
default_column_values_limit: 20000
block_size: 8192
schema:
datafusion:
public:
attributes:
columns:
- data_type: i32
name: Аттрибут0
index_depth: 1000
indexed: true
column_values_limit: 10000
- data_type: i32
name: "Аттрибут1"
index_depth: 1000
- data_type: i32
name: Attribute2
index_depth: 1000
- data_type: i32
name: Attribute3
index_depth: 1000
- data_type: utf8
name: Attribute4
index_depth: 1000
storage-1:
hosts:
tcs-app-storage-01:
tcs-app-storage-02:
vars:
replicaset_alias: aggregators
tarantool_group_name: aggregators
tarantool_config_replicaset:
replication:
failover: supervised
coordinators:
hosts:
coordinator-1:
coordinator-2:
vars:
replicaset_alias: coordinators
vm_1:
hosts:
tcs-app-storage-01:
iproto:
listen:
- uri: 127.0.0.1:3311
advertise:
client: 127.0.0.1:3311
labels:
server: "{{ ansible_host }}"
roles_cfg:
tcs:
aggregator:
http_listen: 0.0.0.0:8777
tcs-app-storage-02:
iproto:
listen:
- uri: 127.0.0.1:3312
advertise:
client: 127.0.0.1:3312
labels:
server: "{{ ansible_host }}"
roles_cfg:
tcs:
aggregator:
http_listen: 0.0.0.0:8787
tcs-scheduler-01:
http_listen: '127.0.0.1:7778'
tcs_scheduler_metrics_enabled: true
tcs_scheduler_features: ["experimental_api"]
tcs-scheduler-02:
http_listen: '127.0.0.1:8778'
tcs_scheduler_metrics_enabled: true
tcs_scheduler_features: ["experimental_api"]
coordinator-1:
tarantool_coordinator: true
coordinator-2:
tarantool_coordinator: true
vars:
ansible_host: 127.0.0.1
ansible_user: "{{ super_user }}"
vars:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: tcs
tarantool_etcd_host: "{{ tarantool_etcd_host }}"
cartridge_systemd_dir: "{{ tarantool_systemd_userspace_dir }}"
cartridge_extra_env:
TCS_REPL_PASS: super-secret
tarantool_config_global:
fiber:
slice:
err: 15
credentials:
users:
replicator:
password: "{% raw %}{{ context.replicator_password }}{% endraw %}"
roles: [replication]
privileges:
- permissions: [execute]
functions: [failover.execute]
client:
password: 'secret'
roles: [super]
memtx:
memory: 114748364
config:
context:
replicator_password:
from: env
env: TCS_REPL_PASS
iproto:
advertise:
peer:
login: 'replicator'
Динамический инвентарь для TCS 0.x¶
plugin: tarantool.enterprise.generator
cluster_name: tarantool
product: TCS
constants:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: tcs
tarantool_etcd_host: "{{ tarantool_etcd_host }}"
tcs_storage_group_name: aggregator
tcs_storage_replicaset_name: aggregator-r01
tarantool_wait_alive_delay: 2
tarantool_wait_alive_retries: 50
cartridge_extra_env:
TCS_REPL_PASS: super-secret
tcs_extra_env:
TOKIO_WORKER_THREADS: 1
tcs_scheduler_metrics_enabled: true
tcs_scheduler_features: ["experimental_api"]
tarantool_config_global:
fiber:
slice:
err: 15
credentials:
users:
replicator:
password: "{% raw %}{{ context.replicator_password }}{% endraw %}"
roles: [replication]
privileges:
- permissions: [execute]
functions: [failover.execute]
client:
password: 'secret'
roles: [super]
memtx:
memory: 114748364
config:
context:
replicator_password:
from: env
env: TCS_REPL_PASS
iproto:
advertise:
peer:
login: 'replicator'
compat:
box_error_serialize_verbose: "new"
failover:
call_timeout: 1
connect_timeout: 1
lease_interval: 10
probe_interval: 1
renew_interval: 10
stateboard:
keepalive_interval: 15
renew_interval: 3
servers:
- name: 'vm_1'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
zone: 'DC2'
port_starts:
iproto: 3444
http: 8091
components:
- name: scheduler
replicasets: 1
replicas: 2
- name: coordinator
replicasets: 1
replicas: 2
- name: aggregator
replicasets: 1
replicas: 2
port_starts:
http_streaming: 9081
config:
aggregator:
rv_update_ms: 100
replicaset:
tarantool_config_replicaset:
replication:
failover: supervised
group:
roles: [app/aggregator_role, app/etcd_stateboard_client]
roles_cfg:
app/aggregator_role:
tcs:
default_column_values_limit: 20000
block_size: 8192
schema:
datafusion:
public:
attributes:
columns:
- data_type: i32
name: Аттрибут0
index_depth: 1000
indexed: true
column_values_limit: 10000
- data_type: i32
name: "Аттрибут1"
index_depth: 1000
- data_type: i32
name: Attribute2
index_depth: 1000
- data_type: i32
name: Attribute3
index_depth: 1000
- data_type: utf8
name: Attribute4
index_depth: 1000
Статический инвентарь для TCS 1.x¶
---
tarantool:
children:
aggregator:
children:
storage-1:
coordinator:
vars:
tarantool_config_group:
roles: [app/aggregator_role, app/etcd_stateboard_client]
storage-1:
hosts:
tcs-app-storage-01:
tcs-app-storage-02:
vars:
replicaset_alias: aggregator
tarantool_group_name: aggregator
tarantool_config_replicaset:
replication:
failover: supervised
coordinator:
hosts:
coordinator-1:
coordinator-2:
vars:
replicaset_alias: coordinators
vm_1:
hosts:
tcs-app-storage-01:
iproto:
listen:
- uri: 127.0.0.1:3311
advertise:
client: 127.0.0.1:3311
labels:
server: "{{ ansible_host }}"
roles_cfg:
app/aggregator_role:
arrow_flight_sql:
credentials:
password: tcs
username: tcs
listen: 127.0.0.1:50051
http:
credentials:
password: tcs
username: tcs
listen: 127.0.0.1:8092
roles.httpd:
default:
listen: 8091
tcs:
aggregator:
rv_update_ms: 100
tcs-app-storage-02:
iproto:
listen:
- uri: 127.0.0.1:3312
advertise:
client: 127.0.0.1:3312
labels:
server: "{{ ansible_host }}"
roles_cfg:
app/aggregator_role:
arrow_flight_sql:
credentials:
password: tcs
username: tcs
listen: 127.0.0.1:50052
http:
credentials:
password: tcs
username: tcs
listen: 127.0.0.1:8094
roles.httpd:
default:
listen: 8093
tcs:
aggregator:
rv_update_ms: 100
coordinator-1:
tarantool_coordinator: true
coordinator-2:
tarantool_coordinator: true
vars:
ansible_host: "{{ tarantool_ansible_host }}"
ansible_port: 2201
ansible_user: "{{ super_user }}"
vars:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: tcs
tarantool_etcd_host: "{{ tarantool_etcd_host }}"
tarantool_systemd_scope: user
cartridge_systemd_dir: "{{ tarantool_systemd_userspace_dir }}"
cartridge_extra_env:
TCS_REPL_PASS: super-secret
tarantool_config_global:
fiber:
slice:
err: 15
credentials:
users:
replicator:
password: "{% raw %}{{ context.replicator_password }}{% endraw %}"
roles: [replication]
privileges:
- permissions: [execute]
functions: [failover.execute]
client:
password: 'secret'
roles: [super]
memtx:
memory: 114748364
config:
context:
replicator_password:
from: env
env: TCS_REPL_PASS
iproto:
advertise:
peer:
login: 'replicator'
Динамический инвентарь для TCS 1.x¶
Добавлено в версии 1.12.0.
---
plugin: tarantool.enterprise.generator
cluster_name: tarantool
product: TCS
constants:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: tcs
tarantool_etcd_host: "{{ tarantool_etcd_host }}"
tcs_v1_support: true
tcs_http_credentials:
username: tcs
password: tcs
tcs_sql_credentials:
username: tcs
password: tcs
tcs_storage_group_name: aggregator
tcs_storage_replicaset_name: aggregator-r01
tarantool_wait_alive_delay: 2
tarantool_wait_alive_retries: 50
cartridge_extra_env:
TCS_REPL_PASS: super-secret
tcs_extra_env:
TOKIO_WORKER_THREADS: 1
tarantool_config_global:
fiber:
slice:
err: 15
credentials:
users:
replicator:
password: "{% raw %}{{ context.replicator_password }}{% endraw %}"
roles: [replication]
privileges:
- permissions: [execute]
functions: [failover.execute]
client:
password: 'secret'
roles: [super]
memtx:
memory: 114748364
config:
context:
replicator_password:
from: env
env: TCS_REPL_PASS
iproto:
advertise:
peer:
login: 'replicator'
compat:
box_error_serialize_verbose: "new"
failover:
call_timeout: 1
connect_timeout: 1
lease_interval: 10
probe_interval: 1
renew_interval: 10
stateboard:
keepalive_interval: 15
renew_interval: 3
servers:
- name: 'vm_1'
host: '{{ tarantool_ansible_host }}'
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
zone: 'DC2'
port_starts:
iproto: 3444
http: 8091
components:
- name: coordinator
replicasets: 1
replicas: 2
- name: aggregator
replicasets: 1
replicas: 2
port_starts:
http_streaming: 9081
config:
aggregator:
rv_update_ms: 100
replicaset:
replication:
failover: supervised
group:
roles: [app/aggregator_role, app/etcd_stateboard_client]
roles_cfg:
app/aggregator_role:
tcs:
aggregator:
rv_update_ms: 10
Динамический инвентарь для TCS 1.x при включенном SSL для ETCD¶
---
plugin: tarantool.enterprise.generator
cluster_name: tarantool
product: TCS
constants:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: tcs
tarantool_etcd_host: "{{ tarantool_etcd_host }}"
tcs_v1_support: true
tcs_sql_credentials:
username: tcs
password: tcs
tcs_http_credentials:
username: tcs
password: tcs
tcs_storage_group_name: aggregator
tcs_storage_replicaset_name: aggregator-r01
tarantool_wait_alive_delay: 2
tarantool_wait_alive_retries: 50
tarantool_etcd_schema_definition: "https"
tarantool_config_etcd_ssl_key_file: "/certs/client.key"
tarantool_config_etcd_ssl_cert_file: "/certs/client.crt"
tarantool_config_etcd_ssl_ca_file: "/certs/rootCA.crt"
cartridge_extra_env:
TCS_REPL_PASS: super-secret
tcs_extra_env:
TOKIO_WORKER_THREADS: 1
tarantool_config_global:
fiber:
slice:
err: 15
credentials:
users:
replicator:
password: "{% raw %}{{ context.replicator_password }}{% endraw %}"
roles: [replication]
privileges:
- permissions: [execute]
functions: [failover.execute]
client:
password: 'secret'
roles: [super]
memtx:
memory: 114748364
config:
context:
replicator_password:
from: env
env: TCS_REPL_PASS
iproto:
advertise:
peer:
login: 'replicator'
compat:
box_error_serialize_verbose: "new"
failover:
call_timeout: 1
connect_timeout: 1
lease_interval: 10
probe_interval: 1
renew_interval: 10
stateboard:
keepalive_interval: 15
renew_interval: 3
servers:
- name: 'vm_1'
host: '{{ tarantool_ansible_host }}'
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
zone: 'DC2'
port_starts:
iproto: 3444
http: 8091
components:
- name: coordinator
replicasets: 1
replicas: 2
- name: aggregator
replicasets: 1
replicas: 2
port_starts:
http_streaming: 9081
sql: 50021
config:
aggregator:
rv_update_ms: 100
replicaset:
replication:
failover: supervised
group:
roles: [app/aggregator_role, app/etcd_stateboard_client]
roles_cfg:
app/aggregator_role:
tcs:
rv_update_ms: 100
Tarantool DB¶
Информация о настройке TDB и сценарии для работы с этим продуктом приведены в разделе Tarantool DB.
Статический инвентарь для Tarantool DB 2.x¶
---
all:
children:
ROUTERS:
children:
router-1:
router-2:
vars:
tarantool_config_group:
app:
module: app.vshard_bootstrapper
sharding:
roles: [router]
roles:
- roles.crud-router
- roles.metrics-export
- dictionary.roles.router
roles_cfg:
roles.crud-router:
stats: true
stats_driver: metrics
stats_quantiles: true
stats_quantile_tolerated_error: 0.001
stats_quantile_age_buckets_count: 5
stats_quantile_max_age_time: 180
STORAGES:
children:
storage-1:
storage-2:
vars:
tarantool_config_group:
sharding:
roles: [storage]
roles:
- roles.crud-storage
- roles.expirationd
- roles.metrics-export
- dictionary.roles.storage
memtx:
memory: 536870912 #512MiB
roles_cfg:
roles.expirationd: []
storage-1:
hosts:
storage-1-1:
storage-1-2:
vars:
replicaset_alias: storage-1
tarantool_config_replicaset:
bootstrap_leader: storage-1-1
replication:
bootstrap_strategy: config
storage-2:
hosts:
storage-2-1:
storage-2-2:
vars:
replicaset_alias: storage-2
tarantool_config_replicaset:
bootstrap_leader: storage-2-1
replication:
bootstrap_strategy: config
router-1:
hosts:
router-1:
vars:
replicaset_alias: router-1
router-2:
hosts:
router-2:
vars:
replicaset_alias: router-2
tarantooldb_test_env_1:
hosts:
storage-1-1:
iproto:
listen:
- uri: 127.0.0.1:3401
advertise:
client: 127.0.0.1:3401
roles_cfg:
roles.metrics-export:
http:
- listen: 8181
endpoints:
- path: /metrics
format: prometheus
router-1:
iproto:
listen:
- uri: 127.0.0.1:3405
advertise:
client: 127.0.0.1:3405
roles_cfg:
roles.metrics-export:
http:
- listen: 8185
endpoints:
- path: /metrics
format: prometheus
vars:
ansible_host: 127.0.0.1
ansible_user: astra
tarantooldb_test_env_2:
hosts:
router-2:
iproto:
listen:
- uri: 127.0.0.1:3406
advertise:
client: 127.0.0.1:3406
roles_cfg:
roles.metrics-export:
http:
- listen: 8186
endpoints:
- path: /metrics
format: prometheus
storage-2-1:
iproto:
listen:
- uri: 127.0.0.1:3403
advertise:
client: 127.0.0.1:3403
roles_cfg:
roles.metrics-export:
http:
- listen: 8183
endpoints:
- path: /metrics
format: prometheus
vars:
ansible_host: 127.0.0.1
ansible_user: astra
tarantooldb_test_env_3:
hosts:
storage-1-2:
iproto:
listen:
- uri: 127.0.0.1:3402
advertise:
client: 127.0.0.1:3402
roles_cfg:
roles.metrics-export:
http:
- listen: 8182
endpoints:
- path: /metrics
format: prometheus
storage-2-2:
iproto:
listen:
- uri: 127.0.0.1:3404
advertise:
client: 127.0.0.1:3404
roles_cfg:
roles.metrics-export:
http:
- listen: 8184
endpoints:
- path: /metrics
format: prometheus
vars:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
vars:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
ansible_ssh_retries: 5
cartridge_app_name: tarantooldb
tarantool_3_0_version_support: true
tarantool_etcd_host: 127.0.0.1
etcd_host: 127.0.0.1
#etcd_cluster_prefix: /cluster
cartridge_failover_params:
prefix: /cluster
state_provider: "etcd2"
etcd2_params:
endpoints:
- "http://127.0.0.1:2379"
tarantool_config_global:
iproto:
advertise:
peer:
login: replicator
sharding:
login: storage
credentials:
users:
replicator:
password: 'replicator_password'
roles:
- super
storage:
password: 'storage_password'
roles:
- sharding
admin:
password: 'secret-cluster-cookie'
roles:
- super
client:
password: 'secret'
roles:
- super
admin-tcm:
password: 'you-know-who'
roles: ['super']
Динамический инвентарь для Tarantool DB 2.x¶
plugin: tarantool.enterprise.generator
cluster_name: tarantool
product: tarantool
# distribution_strategy: StrategyEvenByZone
constants:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: tarantooldb
tarantool_etcd_host: "{{ tarantool_etcd_host }}"
tarantool_tt_connect_username: client
tarantool_tt_connect_password: secret
tarantool_config_global:
iproto:
advertise:
peer:
login: replicator
sharding:
login: storage
credentials:
users:
replicator:
password: 'replicator_password'
roles:
- super
storage:
password: 'storage_password'
roles:
- sharding
admin:
password: 'secret-cluster-cookie'
roles:
- super
client:
password: 'secret'
roles:
- super
admin-tcm:
password: 'you-know-who'
roles: ['super']
servers:
- name: 'vm_1'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
- name: 'vm_2'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
zone: 'DC5'
user: '{{ super_user }}'
port_starts:
iproto: 3401
http: 9091
# - name: 'vm_3'
# host: 127.0.0.1
# advertise_host: '127.0.0.1'
# port: 2201
# zone: 'DC5'
# user: '{{ super_user }}'
# port_starts:
# iproto: 3444
# http: 9092
components:
- name: storage
replicasets: 2
replicas: 2
config:
replicaset:
memtx:
memory: 512000000
group:
sharding:
roles: [storage]
roles:
- roles.crud-storage
- roles.expirationd
- roles.metrics-export
- dictionary.roles.storage
memtx:
memory: 536870912 #512MiB
roles_cfg:
roles.expirationd: []
- name: router
replicasets: 2
replicas: 1
config:
group:
app:
module: app.vshard_bootstrapper
sharding:
roles: [router]
roles:
- roles.crud-router
- roles.metrics-export
- dictionary.roles.router
roles_cfg:
roles.crud-router:
stats: true
stats_driver: metrics
stats_quantiles: true
stats_quantile_tolerated_error: 0.001
stats_quantile_age_buckets_count: 5
stats_quantile_max_age_time: 180
# changes:
# - type: set_variables
# hosts:
# - router
# - storage-r02-i02
# values:
# example: test
Динамический инвентарь для Tarantool DB 2.x при включенном SSL для ETCD¶
---
plugin: tarantool.enterprise.generator
cluster_name: tarantool
product: tarantool
constants:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: tarantooldb
tarantool_etcd_host: "{{ tarantool_etcd_host }}"
tarantool_tt_connect_username: client
tarantool_tt_connect_password: secret
tarantool_3_0_version_support: true
tarantool_etcd_schema_definition: "https"
tarantool_config_etcd_ssl_key_file: "/certs/client.key"
tarantool_config_etcd_ssl_cert_file: "/certs/client.crt"
tarantool_config_etcd_ssl_ca_file: "/certs/rootCA.crt"
tarantool_config_global:
iproto:
advertise:
peer:
login: replicator
sharding:
login: storage
credentials:
users:
replicator:
password: 'replicator_password'
roles:
- super
storage:
password: 'storage_password'
roles:
- sharding
admin:
password: 'secret-cluster-cookie'
roles:
- super
client:
password: 'secret'
roles:
- super
admin-tcm:
password: 'you-know-who'
roles: ['super']
servers:
- name: 'vm_1'
host: '{{ tarantool_ansible_host }}'
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
- name: 'vm_2'
host: '{{ tarantool_ansible_host }}'
advertise_host: '127.0.0.1'
port: 2201
zone: 'DC5'
user: '{{ super_user }}'
port_starts:
iproto: 3401
http: 9091
components:
- name: storage
replicasets: 2
replicas: 2
config:
replicaset:
memtx:
memory: 512000000
group:
sharding:
roles: [storage]
roles:
- roles.crud-storage
- roles.expirationd
- roles.metrics-export
- dictionary.roles.storage
replication:
failover: supervised
memtx:
memory: 536870912 #512MiB
roles_cfg:
roles.expirationd: []
- name: router
replicasets: 2
replicas: 1
config:
group:
app:
module: app.vshard_bootstrapper
sharding:
roles: [router]
roles:
- roles.crud-router
- roles.metrics-export
- dictionary.roles.router
roles_cfg:
roles.crud-router:
stats: true
stats_driver: metrics
stats_quantiles: true
stats_quantile_tolerated_error: 0.001
stats_quantile_age_buckets_count: 5
stats_quantile_max_age_time: 180
- name: coordinator
replicasets: 1
replicas: 2
Динамический инвентарь для Tarantool DB 2.x с централизованным хранилищем Tarantool Config Storage¶
plugin: tarantool.enterprise.generator
cluster_name: tarantool
product: tarantool
# distribution_strategy: StrategyEvenByZone
constants:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_name: tarantooldb
tarantool_tt_connect_username: client
tarantool_tt_connect_password: secret
tarantool_config_storage_endpoints:
- uri: "http://{{ tarantool_etcd_host }}:4401"
login: "config-storage"
password: "123456"
tarantool_config_storage_prefix: "/tarantool/tarantooldb"
tarantool_config_storage: tarantool
tarantool_config_global:
iproto:
advertise:
peer:
login: replicator
sharding:
login: storage
credentials:
users:
replicator:
password: 'replicator_password'
roles:
- super
storage:
password: 'storage_password'
roles:
- sharding
admin:
password: 'secret-cluster-cookie'
roles:
- super
client:
password: 'secret'
roles:
- super
admin-tcm:
password: 'you-know-who'
roles: ['super']
servers:
- name: 'vm_2'
host: '{{ tarantool_ansible_host }}'
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
port_starts:
iproto: 3301
http: 9091
- name: 'vm_3'
host: '{{ tarantool_ansible_host }}'
advertise_host: '127.0.0.1'
port: 2201
zone: 'DC5'
user: '{{ super_user }}'
port_starts:
iproto: 3401
http: 9091
components:
- name: storage
replicasets: 2
replicas: 2
config:
replicaset:
memtx:
memory: 512000000
group:
sharding:
roles: [storage]
roles:
- roles.crud-storage
- roles.expirationd
- roles.metrics-export
- dictionary.roles.storage
memtx:
memory: 536870912 #512MiB
roles_cfg:
roles.expirationd: []
- name: router
replicasets: 2
replicas: 1
config:
group:
app:
module: app.vshard_bootstrapper
sharding:
roles: [router]
roles:
- roles.crud-router
- roles.metrics-export
- dictionary.roles.router
roles_cfg:
roles.crud-router:
stats: true
stats_driver: metrics
stats_quantiles: true
stats_quantile_tolerated_error: 0.001
stats_quantile_age_buckets_count: 5
stats_quantile_max_age_time: 180
Tarantool DB (RESP)¶
Статический инвентарь¶
tdb-redis-cluster-1:
children:
STORAGES:
hosts:
tdb-redis-r01-s01:
tdb-redis-r01-s02:
replicaset_tdb_storage_01:
hosts:
tdb-redis-r01-s01:
tdb-redis-r01-s02:
vars:
failover_priority:
- tdb-redis-r01-s01
- tdb-redis-r01-s02
replicaset_alias: tdb-storage-01
roles:
- metrics
- failover-coordinator
- app.roles.tdb_redis
vm_1:
hosts:
tdb-redis-r01-s01:
vars:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
vm_2:
hosts:
tdb-redis-r01-s02:
vars:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
hosts:
tdb-redis-r01-s01:
config:
advertise_uri: localhost:3401
http_port: 8080
log: /app/logs/tdb-redis/tdb-redis-r01-s01.log
vinyl_cache: 0
vinyl_memory: 0
tdb_redis_port: 6379
tdb-redis-r01-s02:
config:
advertise_uri: localhost:3410
http_port: 8099
log: /app/logs/tdb-redis/tdb-redis-r01-s02.log
vinyl_cache: 0
vinyl_memory: 0
tdb_redis_port: 6380
vars:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_app_directory: /app/tarantool/tdb-redis
cartridge_app_name: tdb-redis
cartridge_keep_num_latest_dists: 10
cartridge_cluster_cookie: molecule-testing
cartridge_failover_params:
etcd2_params:
endpoints:
- http://{{ tarantool_etcd_host }}:2379
lock_delay: 10
prefix: /tdb-redis-1
failover_timeout: 20
fencing_enabled: true
fencing_pause: 2
fencing_timeout: 10
mode: stateful
state_provider: etcd2
Tarantool Data Grid¶
Сценарии для работы с TDG приведены в разделе Tarantool Data Grid.
Статический инвентарь¶
---
tdg-cluster:
children:
CORE:
hosts:
tdg-core-01:
tdg-core-02:
CORE_GROUP_1:
hosts:
tdg-core-01:
CORE_GROUP_2:
hosts:
tdg-core-02:
RUNNERS:
hosts:
tdg-runner-01:
STORAGES:
hosts:
tdg-r01-s01:
tdg-r01-s02:
STORAGES_GROUP_1:
hosts:
tdg-r01-s01:
STORAGES_GROUP_2:
hosts:
tdg-r01-s02:
replicaset_lt_core:
hosts:
tdg-core-01:
tdg-core-02:
vars:
failover_priority:
- tdg-core-01
- tdg-core-02
replicaset_alias: lt-core
roles:
- core
replicaset_lt_runner_01:
hosts:
tdg-runner-01:
vars:
failover_priority:
- tdg-runner-01
replicaset_alias: lt-runner-01
roles:
- connector
- failover-coordinator
- runner
replicaset_lt_storage_01:
hosts:
tdg-r01-s01:
tdg-r01-s02:
vars:
failover_priority:
- tdg-r01-s01
- tdg-r01-s02
replicaset_alias: lt-storage-01
roles:
- storage
hosts:
tdg-core-01:
config:
advertise_uri: localhost:3301
http_port: 8081
log: /app/logs/tdg/tdg-core-01.log
vinyl_cache: 0
vinyl_memory: 0
tdg-core-02:
config:
advertise_uri: localhost:3302
http_port: 8082
log: /app/logs/tdg/tdg-core-02.log
vinyl_cache: 0
vinyl_memory: 0
tdg-r01-s01:
config:
advertise_uri: localhost:3303
http_port: 8083
log: /app/logs/tdg/tdg-r01-s01.log
tdg-r01-s02:
config:
advertise_uri: localhost:3304
http_port: 8084
log: /app/logs/tdg/tdg-r01-s02.log
tdg-runner-01:
config:
advertise_uri: localhost:3305
http_port: 8080
log: /app/logs/tdg/tdg-runner-01.log
vars:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null
-o StrictHostKeyChecking=no
cartridge_cluster_cookie: tdg-secret-cookie
cartridge_app_config:
ldap:
body:
- domain: test.mail.ru
hosts:
- localhost:389
options: []
roles:
- domain_groups:
- CN=test,OU=Groups,OU=Test,DC=test,DC=mail,DC=ru
role: admin
search_timeout: 2
use_active_directory: true
use_tls: false
metrics:
body:
export:
- format: prometheus
path: /metrics
- format: health
path: /health
global-labels:
name: tdg
cartridge_app_name: tdg
cartridge_defaults:
log_level: 5
cartridge_failover_params:
etcd2_params:
endpoints:
- http://{{ tarantool_etcd_host }}:2379
lock_delay: 10
prefix: /tdg-cluster
failover_timeout: 20
fencing_enabled: true
fencing_pause: 2
fencing_timeout: 10
mode: stateful
state_provider: etcd2
cartridge_keep_num_latest_dists: 10
cartridge_package_name: tdg
Tarantool Queue Enterprise¶
Сценарии для работы с TQE приведены в разделе Tarantool Queue Enterprise (MQ).
Статический инвентарь¶
all:
children:
# Группа, серверы шины
bus:
vars:
# Конфигурация, которая будет загружена в кластер после его готовности
cartridge_app_config:
# Логин пароль для подключения к Tarantool
creds:
body:
user: user
pass: pass
# Список очередей, которые будут доступны на шине
queues:
body:
- output_by_instruments
- output_by_users
- input
- dictionaries
# Конфигурация метрик, по какому URL и в каком формате забирать метрики
metrics:
body:
export:
- format: prometheus
path: /metrics
- format: health
path: /health
global-labels:
ris_name: my-app
ris_sub: prod
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
cartridge_app_name: my-app
cartridge_app_directory: "{{ cartridge_app_directory }}"
cartridge_app_install_dir: "{{ cartridge_app_directory }}/dist"
cartridge_app_instances_dir: "{{ cartridge_app_directory }}/bin"
cartridge_run_dir: "{{ cartridge_app_directory }}/run"
cartridge_data_dir: "{{ cartridge_app_directory }}/workdir"
cartridge_conf_dir: "{{ cartridge_app_directory }}/etc/conf.d"
cartridge_log_dir_parent: "{{ cartridge_log_dir_parent }}"
cartridge_memtx_dir_parent: "{{ cartridge_memtx_dir_parent }}"
cartridge_cluster_cookie: some-random-cookie-for-each-cluster
cartridge_app_group: tarantool
cartridge_app_user: tarantool
cartridge_install_tarantool_for_tgz: false
cartridge_keep_num_latest_dists: 10
cartridge_multiversion: true
cartridge_not_save_cookie_in_app_config: false
cartridge_bootstrap_vshard: true
cartridge_configure_systemd_unit_files: false
cartridge_configure_tmpfiles: false
cartridge_create_user_group_for_tgz: false
cartridge_defaults:
audit_filter: compatibility,audit,ddl,custom
audit_format: plain
audit_log: syslog:identity=tarantool,facility=user
log_format: json
log_level: 5
cartridge_failover_params:
mode: eventual
tarantool_configure_logrotate: true
hosts:
my-app-core-01:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
my-app-api-01:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
# Группа, конфигурация набора реплик для Tarantool
replicaset-my-app-core:
hosts:
# Список хостов, которые входят в эту группу
my-app-core-01:
vars:
# Приоритет переключения master'a для failover
# При падении master'a его роль передается следующему в списке
failover_priority:
- my-app-core-01
# Название набора реплик
replicaset_alias: my-app-core
# Роли, которые нужно назначить набору реплик
# Не изменять
roles:
- app.roles.queue
- app.roles.api
hosts:
# Хост, экземпляр Tarantool шины СЭТ
my-app-core-01:
config:
# HOST:PORT для подключения по iproto
advertise_uri: localhost:3305
# HOST:PORT для iproto трафика
http_port: 8085
# настройка записи логов в файл
log: /app/logs/my-app/my-app-core-01.log
# Хост, экземпляр Синхронного API и API подписок шины СЭТ
my-app-api-01:
state: started
# Состояние, в котором должен находиться сервис:
# started - сервис запущен
# stopped - сервис остановлен
# restarted - сервис перезапущен
config:
# Обязательно заполнить этот параметр
advertise_uri: localhost:3305
app_name: BUS_API
app_version: develop
core_host: 0.0.0.0
# Порт подключения для выполнения административных функций
# Например, снятие метрик
core_port: 8184
grpc_host: 0.0.0.0
# Порт, по которому доступно GRPC API сервиса
grpc_port: 8182
tracing:
jaeger_collector_endpoint: "http://localhost:14268/api/traces"
# Конфигурация подключения к Tarantool
publisher:
enabled: true
tarantool:
user: "{{ hostvars[groups['bus'][0]]['cartridge_app_config']['creds']['body']['user'] }}"
pass: "{{ hostvars[groups['bus'][0]]['cartridge_app_config']['creds']['body']['pass'] }}"
queues:
output_by_instruments:
connections:
storage:
- localhost:3305
output_by_users:
connections:
storage:
- localhost:3305
input:
connections:
routers:
- localhost:3305
dictionaries:
connections:
storage:
- localhost:3305
# Конфигурация подключения к Tarantool
consumer:
enabled: true
polling_timeout: 500ms
tarantool:
user: user
pass: pass
queues:
output_by_instruments:
connections:
storage:
- localhost:3305
output_by_users:
connections:
storage:
- localhost:3305
input:
connections:
routers:
- localhost:3305
dictionaries:
connections:
storage:
- localhost:3305
Статический инвентарь (версия 3.0.0)¶
all:
children:
# Группа, серверы шины
bus:
vars:
# Конфигурация, которая будет загружена в кластер после его готовности
cartridge_app_config:
# Логин и пароль для подключения к Tarantool
creds:
body:
user: user
pass: pass
# Список очередей, которые будут доступны на шине
queues:
body:
- output_by_instruments
- output_by_users
- input
- dictionaries
# Конфигурация метрик (в каком формате и по каким URL забирать метрики)
metrics:
body:
export:
- format: prometheus
path: /metrics
- format: health
path: /health
global-labels:
ris_name: my-app
ris_sub: prod
# Параметры SSH-подключения
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
# Основные пути и директории приложения
cartridge_app_name: my-app
cartridge_app_directory: "{{ cartridge_app_directory }}"
cartridge_app_install_dir: "{{ cartridge_app_directory }}/dist"
cartridge_app_instances_dir: "{{ cartridge_app_directory }}/bin"
cartridge_run_dir: "{{ cartridge_app_directory }}/run"
cartridge_data_dir: "{{ cartridge_app_directory }}/workdir"
cartridge_conf_dir: "{{ cartridge_app_directory }}/etc/conf.d"
cartridge_log_dir_parent: "{{ cartridge_log_dir_parent }}"
cartridge_memtx_dir_parent: "{{ cartridge_memtx_dir_parent }}"
cartridge_cluster_cookie: some-random-cookie-for-each-cluster
# Пользователь и группа под которыми запускается Tarantool
cartridge_app_group: tarantool
cartridge_app_user: tarantool
# Прочие параметры установки
cartridge_install_tarantool_for_tgz: false
cartridge_keep_num_latest_dists: 10
cartridge_multiversion: true
cartridge_not_save_cookie_in_app_config: false
cartridge_bootstrap_vshard: true
cartridge_configure_systemd_unit_files: false
cartridge_configure_tmpfiles: false
cartridge_create_user_group_for_tgz: false
# Значения по умолчанию для конфигурации Tarantool
cartridge_defaults:
audit_filter: compatibility,audit,ddl,custom
audit_format: plain
audit_log: syslog:identity=tarantool,facility=user
log_format: json
log_level: 5
# Настройки failover
cartridge_failover_params:
mode: eventual
# Настройка logrotate
tarantool_configure_logrotate: true
hosts:
my-app-core-01:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
my-app-api-01:
ansible_host: 127.0.0.1
ansible_user: '{{ super_user }}'
# Группа, конфигурация набора реплик для Tarantool
replicaset-my-app-core:
hosts:
# Список хостов, которые входят в эту группу
my-app-core-01:
vars:
# Приоритет переключения master'a для failover
failover_priority:
- my-app-core-01
# Название набора реплик
replicaset_alias: my-app-core
# Роли, которые нужно назначить набору реплик
roles:
- app.roles.queue
- app.roles.api
hosts:
# Хост, экземпляр Tarantool шины СЭТ
my-app-core-01:
config:
# HOST:PORT для подключения по iproto
advertise_uri: localhost:3305
# HOST:PORT для iproto трафика (http API Tarantool)
http_port: 8085
# настройка записи логов в файл
log: /app/logs/my-app/my-app-core-01.log
# Хост, экземпляр API сервиса (gRPC + REST)
my-app-api-01:
state: started
# Состояние сервиса (started/stopped/restarted)
config:
# Обязательно заполнить этот параметр
advertise_uri: localhost:3305
# Название и версия приложения
app_name: BUS_API
app_version: develop
# Настройки административного HTTP-интерфейса (метрики/health)
core_host: 0.0.0.0
core_port: 8184
# В новой версии вместо grpc_host/grpc_port используется grpc_listen
grpc_listen:
# IPv4 слушатель
- uri: "tcp://0.0.0.0:8182"
# IPv6 слушатель
- uri: "tcp://[::]:8182"
# Логи gRPC-сервера
log:
file: /app/logs/my-app/grpc_log.json
format: json
level: info
# Опции gRPC (включена рефлексия для grpcurl list)
grpc_options:
reflection_enabled: true
# Конфигурация подключения продьюсера (бывший publisher)
producer:
enabled: true
tarantool:
user: user
pass: pass
# Базовые соединения для всех очередей
connections:
routers:
- "127.0.0.1:3307"
- "127.0.0.1:3308"
queues:
# Очереди объявлены, но могут использовать базовые соединения
output_by_instruments: {}
output_by_users: {}
input: {}
dictionaries: {}
# Конфигурация подключения консюмера
consumer:
enabled: true
polling_timeout: 500ms
tarantool:
user: user
pass: pass
# Базовые соединения (каноничный алиас storage-1)
connections:
storage-1:
- "127.0.0.1:3301"
- "127.0.0.1:3302"
queues:
# Для конкретной очереди можно переопределить соединения
output_by_instruments:
connections:
storage-1:
- "127.0.0.1:3301"
output_by_users: {}
input: {}
dictionaries: {}
Динамический инвентарь (версия 3.0.0)¶
# Плагин динамического инвентаря
plugin: tarantool.enterprise.generator
# Имя кластера и продукт
cluster_name: tarantool
product: TQE
# distribution_strategy: StrategyEvenByZone # (опционально) стратегия раскладки по зонам
# Константы — общие параметры для всего кластера
constants:
# Куда собирать логи локально на хосте Ansible
tarantool_collected_logs_local_path: /tmp/logs
# Параметры SSH-подключения
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
# Имя приложения и системный пользователь/группа
cartridge_app_name: my-app
cartridge_app_group: tarantool
cartridge_app_user: tarantool
# Подключение etcd по TLS и пути до сертификатов
tarantool_etcd_schema_definition: "https"
tarantool_config_etcd_ssl_key_file: "/certs/client.key"
tarantool_config_etcd_ssl_cert_file: "/certs/client.crt"
tarantool_config_etcd_ssl_ca_file: "/certs/rootCA.crt"
# Глобальная конфигурация Tarantool Config
tarantool_config_global:
# Настройка ролей и их параметров
roles_cfg:
app.roles.queue:
queues:
- name: queue
- name: output_by_instruments
- name: output_by_users
- name: input
- name: dictionaries
# Пример для роутеров (раскомментировать при необходимости)
# app.roles.api:
# autobootstrap: true
# sharding:
# routing:
# core_storage-r01:
# buckets:
# - [501, 1000]
# core_storage-r02:
# buckets:
# - [1, 500]
# Пользователи и их роли/пароли
credentials:
users:
user:
roles: [super]
password: pass
storage:
roles: [sharding]
password: storage
client:
password: 'secret'
roles: [super]
replicator:
password: 'topsecret'
roles: [replication]
# Настройки iproto авторизации для ролей peer/sharding
iproto:
advertise:
peer:
login: replicator
sharding:
login: storage
password: storage
# Общие параметры шардирования
sharding:
bucket_count: 1000
rebalancer_mode: "off"
# Описание хостов, на которых разворачивается кластер
servers:
- name: 'vm_1'
host: '{{ tarantool_ansible_host }}' # адрес берётся из внешней переменной
advertise_host: '127.0.0.1' # адрес, по которому экземпляры будут анонсироваться
port: 2201 # SSH-порт
user: '{{ super_user }}' # SSH-пользователь
# Описание компонент кластера и их параметров
components:
# Хранилища (storage)
- name: core_storage
replicasets: 2 # количество наборов реплик
replicas: 3 # число копий в каждом наборе
config:
replicaset:
memtx:
memory: 512000000
roles:
- app.roles.queue
sharding:
roles: [storage]
# Роутеры (router)
- name: core_router
replicasets: 2
replicas: 1
config:
replicaset:
roles:
- app.roles.api
sharding:
roles: [router]
# gRPC-сервис (новая схема 3.0.0)
- name: grpc_server
replicasets: 1
replicas: 1
config:
# Название и версия приложения
app_name: BUS_API
app_version: develop
# gRPC слушатели (вместо grpc_host/grpc_port)
grpc_listen:
- uri: "tcp://0.0.0.0:8182" # IPv4
- uri: "tcp://[::]:8182" # IPv6
# Административный HTTP (метрики/health)
core_host: 0.0.0.0
core_port: 8089
# Настройка логирования gRPC
log:
file: /app/logs/my-app/grpc_log.json
format: json
level: info
# gRPC-опции (включена рефлексия — удобно для grpcurl list)
grpc_options:
reflection_enabled: true
# Продьюсер (бывший publisher) — новая схема
producer:
enabled: true
tarantool:
user: user
pass: pass
# Базовые соединения для всех очередей
connections:
routers:
- "127.0.0.1:3307"
- "127.0.0.1:3308"
# Перечень очередей (per-queue можно переопределить connections)
queues:
queue: {}
output_by_instruments: {}
output_by_users: {}
input: {}
dictionaries: {}
# Консюмер (каноничный алиас storage-1)
consumer:
enabled: true
polling_timeout: 500ms
tarantool:
user: user
pass: pass
# Базовые соединения для всех очередей
connections:
storage-1:
- "127.0.0.1:3301"
- "127.0.0.1:3302"
- "127.0.0.1:3303"
- "127.0.0.1:3304"
- "127.0.0.1:3305"
- "127.0.0.1:3306"
queues:
queue: {}
# Пример точечного переопределения соединений на уровне очереди
output_by_instruments:
connections:
storage-1:
- "127.0.0.1:3301"
output_by_users: {}
input: {}
dictionaries: {}
# Изменения поверх сгенерированного инвентаря (патчи)
changes:
- type: set_variables
hosts:
- core_storage-r01-i02
values:
replication:
anon: true
Статический инвентарь для Tarantool 3.x (1 реплика)¶
---
tarantool:
children:
bus:
children:
cores:
vars:
tarantool_config_group:
roles:
- app.roles.api
- app.roles.queue
sharding:
roles: [router, storage]
cores:
hosts:
tqe-app-core-01:
vars:
replicaset_alias: cores
tarantool_group_name: cores
tarantool_config_replicaset:
roles_cfg:
app.roles.queue:
queues:
- name: output_by_instruments
- name: output_by_users
- name: input
- name: dictionaries
- name: queue
app.roles.api:
autobootstrap: true
sharding:
routing:
cores:
buckets:
- [1, 1000]
tarantool_config_global:
credentials:
users:
user:
roles: [super]
password: pass
storage:
roles: [sharding]
password: storage
iproto:
advertise:
sharding:
login: storage
password: storage
sharding:
bucket_count: 1000
rebalancer_mode: "off"
vm_1:
vars:
ansible_host: 127.0.0.1
ansible_user: "{{ super_user }}"
hosts:
tqe-app-core-01:
iproto:
listen:
- uri: 127.0.0.1:3305
advertise:
client: 127.0.0.1:3305
labels:
server: "{{ ansible_host }}"
my-app-api-01:
state: started
config:
advertise_uri: localhost:3305
app_name: BUS_API
app_version: develop
core_host: 0.0.0.0
# Порт подключения для выполнения административных функций
# Например, снятие метрик
core_port: 8184
grpc_host: 0.0.0.0
# Порт, по которому доступно GRPC API сервиса
grpc_port: 8182
tracing:
jaeger_collector_endpoint: "http://localhost:14268/api/traces"
# Конфигурация подключения к Tarantool
publisher:
enabled: true
tarantool:
user: user
pass: pass
queues:
queue:
connections:
routers:
- localhost:3305
output_by_instruments:
connections:
storage:
- localhost:3305
output_by_users:
connections:
storage:
- localhost:3305
input:
connections:
routers:
- localhost:3305
dictionaries:
connections:
storage:
- localhost:3305
# Конфигурация подключения к Tarantool
consumer:
enabled: true
polling_timeout: 500ms
tarantool:
user: user
pass: pass
queues:
queue:
connections:
storage:
- localhost:3305
output_by_instruments:
connections:
storage:
- localhost:3305
output_by_users:
connections:
storage:
- localhost:3305
input:
connections:
routers:
- localhost:3305
dictionaries:
connections:
storage:
- localhost:3305
vars:
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
cartridge_app_name: my-app
cartridge_app_group: tarantool
cartridge_app_user: tarantool
Статический инвентарь для Tarantool 3.x (несколько реплик/наборов реплик, бутстрап последнего экземпляра роутера)¶
tarantool:
children:
ROUTERS:
children:
router-01:
router-02:
vars:
tarantool_config_group:
roles:
- app.roles.api
sharding:
roles: [router]
STORAGES:
children:
storage-01:
storage-02:
vars:
tarantool_config_group:
memtx:
memory: 512000000
roles:
- app.roles.queue
sharding:
roles: [storage]
router-01:
hosts:
router-r01-s01:
vars:
replicaset_alias: router-01
router-02:
hosts:
router-r02-s01:
vars:
replicaset_alias: router-02
roles_cfg:
app.roles.api:
autobootstrap: true
sharding:
routing:
storage-01:
buckets:
- [501, 1000]
storage-02:
buckets:
- [1, 500]
storage-01:
hosts:
storage-r01-s01:
storage-r01-s02:
vars:
replicaset_alias: storage-01
storage-02:
hosts:
storage-r02-s01:
storage-r02-s02:
vars:
replicaset_alias: storage-02
vm_1:
vars:
ansible_host: "{{ tarantool_ansible_host }}"
ansible_port: 2201
ansible_user: "{{ super_user }}"
hosts:
router-r01-s01:
iproto:
listen:
- uri: 127.0.0.1:3305
advertise:
client: 127.0.0.1:3305
labels:
server: "{{ ansible_host }}"
router-r02-s01:
iproto:
listen:
- uri: 127.0.0.1:3306
advertise:
client: 127.0.0.1:3306
labels:
server: "{{ ansible_host }}"
storage-r01-s01:
iproto:
listen:
- uri: 127.0.0.1:3307
advertise:
client: 127.0.0.1:3307
labels:
server: "{{ ansible_host }}"
storage-r01-s02:
iproto:
listen:
- uri: 127.0.0.1:3308
advertise:
client: 127.0.0.1:3308
labels:
server: "{{ ansible_host }}"
storage-r02-s01:
iproto:
listen:
- uri: 127.0.0.1:3309
advertise:
client: 127.0.0.1:3309
labels:
server: "{{ ansible_host }}"
storage-r02-s02:
iproto:
listen:
- uri: 127.0.0.1:3304
advertise:
client: 127.0.0.1:3304
labels:
server: "{{ ansible_host }}"
grpc-01:
state: started
tarantool_grpc: true
config:
app_name: BUS_API
app_version: develop
core_host: 0.0.0.0
# Порт подключения для выполнения административных функций
# Например, снятие метрик
core_port: 8184
grpc_host: 0.0.0.0
# Порт, по которому доступно GRPC API сервиса
grpc_port: 8182
log:
file: /app/logs/my-app/grpc_logs.json
format: json
tracing:
jaeger_collector_endpoint: "http://localhost:14268/api/traces"
# Конфигурация подключения к Tarantool
publisher:
enabled: true
tarantool:
user: user
pass: pass
queues:
queue:
connections:
routers:
- localhost:3305
- localhost:3306
output_by_instruments:
connections:
routers:
- localhost:3305
- localhost:3306
output_by_users:
connections:
routers:
- localhost:3305
- localhost:3306
input:
connections:
routers:
- localhost:3305
- localhost:3306
dictionaries:
connections:
routers:
- localhost:3305
- localhost:3306
# Конфигурация подклчения к Tarantool
consumer:
enabled: true
polling_timeout: 500ms
tarantool:
user: user
pass: pass
queues:
queue:
connections:
storage:
- localhost:3304
- localhost:3308
- localhost:3309
- localhost:3307
output_by_instruments:
connections:
storage:
- localhost:3304
- localhost:3308
- localhost:3309
- localhost:3307
output_by_users:
connections:
storage:
- localhost:3307
- localhost:3304
- localhost:3308
- localhost:3309
input:
connections:
storage:
- localhost:3307
- localhost:3304
- localhost:3308
- localhost:3309
dictionaries:
connections:
storage:
- localhost:3307
- localhost:3304
- localhost:3308
- localhost:3309
vars:
tarantool_collected_logs_local_path: /tmp/logs
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
cartridge_app_name: my-app
cartridge_app_group: tarantool
cartridge_app_user: tarantool
tarantool_config_global:
roles_cfg:
app.roles.queue:
queues:
- name: queue
- name: output_by_instruments
- name: output_by_users
- name: input
- name: dictionaries
credentials:
users:
user:
roles: [super]
password: pass
storage:
roles: [sharding]
password: storage
client:
password: 'secret'
roles: [super]
replicator:
password: 'topsecret'
roles: [replication]
iproto:
advertise:
peer:
login: replicator
sharding:
login: storage
password: storage
sharding:
bucket_count: 1000
rebalancer_mode: "off"
Динамический инвентарь для Tarantool 3.x¶
plugin: tarantool.enterprise.generator
cluster_name: tarantool
product: TQE
# distribution_strategy: StrategyEvenByZone
constants:
tarantool_collected_logs_local_path: /tmp/logs
ansible_ssh_common_args: -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
cartridge_app_name: my-app
cartridge_app_group: tarantool
cartridge_app_user: tarantool
tarantool_config_global:
roles_cfg:
app.roles.queue:
queues:
- name: queue
app.roles.api:
autobootstrap: true
sharding:
routing:
core-r01:
buckets:
- [1, 1000]
credentials:
users:
user:
roles: [super]
password: pass
storage:
roles: [sharding]
password: storage
iproto:
advertise:
sharding:
login: storage
password: storage
sharding:
bucket_count: 1000
rebalancer_mode: "off"
servers:
- name: 'vm_1'
host: 127.0.0.1
advertise_host: '127.0.0.1'
port: 2201
user: '{{ super_user }}'
components:
- name: core
replicasets: 1
replicas: 1
config:
replicaset:
memtx:
memory: 512000000
roles:
- app.roles.api
- app.roles.queue
sharding:
roles: [router, storage]
- name: grpc_server
replicasets: 1
replicas: 1
config:
app_name: BUS_API
app_version: develop
log:
file: /app/logs/my-app/grpc_log.json
format: json
publisher:
enabled: true
tarantool:
user: user
pass: pass
queues:
queue:
output_by_instruments:
connections:
example:
- "127.0.0.1:3301"
output_by_users:
input:
dictionaries:
consumer:
enabled: true
polling_timeout: 500ms
tarantool:
user: user
pass: pass
queues:
queue:
output_by_instruments:
connections:
example:
- "127.0.0.1:3301"
output_by_users:
input:
dictionaries: