Controller : adjutant, aodh, ceilometer, cloudkitty, cinder, glance, heat, horizon, keystone, magnum, manila, nova, neutron, octavia, placement, rabbitmq, senlin, zookeeper
Compute : nova compute, neutron agent (l3, metadata, openvswitch)
[Controller Node]
- vim package 설치
- install_package.sh
#!/bin/sh
readarray -t container_ids < <(lxc-ls -1)
echo $container_ids
apt_packages="vim"
install_package() {
local container_id=$1
local apt_package=$2
echo "-------------------------------------------------------"
echo "install $apt_package to $container_id, "
echo "-------------------------------------------------------"
lxc-attach $container_id -- apt install -y $apt_package
}
for ((i = 0; i < ${#container_ids[@]} - 1; i++)); do
install_package "${container_ids[i]}" "$apt_packages"
done
- log directory 생성
- mkdir_log.sh
#!/bin/sh
read service
readarray -t container_id < <(lxc-ls -1 | grep $service)
echo $container_id
create_log_directory() {
echo "-------------------------------------------------------"
echo "$1 log directory"
echo "-------------------------------------------------------"
lxc-attach $1 -- bash -c "
if [ ! -d /var/log/$service ]; then
mkdir /var/log/$service
fi
"
lxc-attach $1 -- chmod 777 /var/log/$service/
lxc-attach $1 -- chown $service:$service /var/log/$service/
lxc-attach $1 -- ls -al /var/log/$service/
}
create_log_directory "$container_id"
- log rotate 생성
- log_rotate.sh
#!/bin/sh
read service
readarray -t container_id < <(lxc-ls -1 | grep $service)
echo $container_id
update_log_rotate_conf() {
echo "-------------------------------------------------------"
echo "$1 logrotate file"
echo "-------------------------------------------------------"
lxc-attach $1 -- bash -c "echo '/var/log/$service/*.log
{
copytruncate
weekly
missingok
rotate 4
compress
dateext
maxage 30
notifempty
nocreate
sharedscripts
postrotate
systemctl restart rsyslog > /dev/null 2>&1 || true
endscript
}
' > /etc/logrotate.d/$service-log-rotate"
lxc-attach $1 -- cat /etc/logrotate.d/$service-log-rotate
}
update_log_rotate_conf "$container_id"
- zookeeper log rotate 생성
- zookeeper_lotate.sh
#!/bin/sh
read service
readarray -t container_id < <(lxc-ls -1 | grep $service)
echo $container_id
update_log_rotate_conf() {
echo "-------------------------------------------------------"
echo "$1 logrotate file"
echo "-------------------------------------------------------"
lxc-attach $1 -- bash -c "echo '/var/log/$service/*.log
{
copytruncate
weekly
missingok
rotate 4
compress
dateext
maxage 30
notifempty
nocreate
sharedscripts
postrotate
systemctl restart rsyslog > /dev/null 2>&1 || true
endscript
}
' > /etc/logrotate.d/$service-log-rotate"
lxc-attach $1 -- cat /etc/logrotate.d/$service-log-rotate
}
update_log_rotate_conf "$container_id"
- neutron log rotate 생성
- neutron_lotate.sh
#!/bin/sh
neutron_log_rotate_conf() {
echo "-------------------------------------------------------"
echo "neutron logrotate file"
echo "-------------------------------------------------------"
echo 'neutron-dhcp-agent.log
neutron-l3-agent.log
neutron-metadata-agent.log
neutron-metering-agent.log
neutron-openvswitch-agent.log
privsep-helper.log
{
copytruncate
weekly
missingok
rotate 4
compress
dateext
maxage 30
notifempty
nocreate
sharedscripts
postrotate
systemctl restart rsyslog > /dev/null 2>&1 || true
endscript
}
' > /etc/logrotate.d/neutron-log-rotate
}
neutron_log_rotate_conf
cat /etc/logrotate.d/neutron-log-rotate
- adjutant, aodh, ceilometer, cinder, cloudkitty, glance, heat log 설정
# adjutant
ansible adj -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --logto \/var\/log\/adjutant\/adjutant-api.log/' /etc/systemd/system/adjutant-api.service"
ansible adj -i lxc -m shell -b -a "systemctl daemon-reload"
ansible adj -i lxc -m shell -b -a "systemctl restart adjutant-*"
# aodh
ansible aodh -i lxc -m shell -b -a "sed -i 's/use_journal = True/use_journal = False/g' /etc/aodh/aodh.conf"
ansible aodh -i lxc -m shell -b -a "mkdir /var/log/aodh"
ansible aodh -i lxc -m shell -b -a "chown aodh:aodh /var/log/aodh"
ansible aodh -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --logto \/var\/log\/aodh\/aodh-api.log/' /etc/systemd/system/aodh-api.service"
ansible aodh -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --log-file \/var\/log\/aodh\/aodh-notifier.log/' /etc/systemd/system/aodh-notifier.service"
ansible aodh -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --log-file \/var\/log\/aodh\/aodh-evaluator.log/' /etc/systemd/system/aodh-evaluator.service"
ansible aodh -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --log-file \/var\/log\/aodh\/aodh-listener.log/' /etc/systemd/system/aodh-listener.service"
ansible aodh -i lxc -m shell -b -a "systemctl daemon-reload"
ansible aodh -i lxc -m shell -b -a "systemctl restart aodh-*"
# ceilometer
ansible ceilo -i lxc -m shell -b -a "sed -i 's/use_journal = True/use_journal = False/g' /etc/ceilometer/ceilometer.conf"
ansible ceilo -i lxc -m shell -b -a "mkdir /var/log/ceilometer"
ansible ceilo -i lxc -m shell -b -a "chown ceilometer:ceilometer /var/log/ceilometer"
ansible com -i node -m shell -b -a "mkdir -p /var/log/openstack/ceilometer"
ansible com -i node -m shell -b -a "chown ceilometer:ceilometer /var/log/openstack/ceilometer"
ansible ceilo -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --log-file \/var\/log\/ceilometer\/ceilometer-agent-notification.log/' /etc/systemd/system/ceilometer-agent-notification.service"
ansible ceilo -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --log-file \/var\/log\/ceilometer\/ceilometer-polling-central.log/' /etc/systemd/system/ceilometer-polling.service"
ansible com -i node -m shell -b -a "sed -i '/^ExecStart/ s/$/ --log-file \/var\/log\/openstack\/ceilometer\/ceilometer-polling-compute.log/' /etc/systemd/system/ceilometer-polling.service"
ansible ceilo -i lxc -m shell -b -a "systemctl daemon-reload"
ansible com -i node -m shell -b -a "systemctl daemon-reload"
ansible ceilo -i lxc -m shell -b -a "systemctl restart ceilometer-*"
ansible com -i node -m shell -b -a "systemctl restart ceilometer-*"
# cinder
ansible con -i node -m shell -b -a "sed -i 's/use_journal = True/use_journal = False/g' /etc/cinder/cinder.conf"
ansible con -i node -m shell -b -a "mkdir -p /var/log/openstack/cinder"
ansible con -i node -m shell -b -a "chown cinder:cinder /var/log/openstack/cinder"
ansible con -i node -m shell -b -a "sed -i '/^ExecStart/ s/$/ --logto \/var\/log\/openstack\/cinder\/cinder-api.log/' /etc/systemd/system/cinder-api.service"
ansible con -i node -m shell -b -a "sed -i '/^ExecStart/ s/$/ --log-file \/var\/log\/openstack\/cinder\/cinder-scheduler.log/' /etc/systemd/system/cinder-scheduler.service"
ansible con -i node -m shell -b -a "sed -i '/^ExecStart/ s/$/ --log-file \/var\/log\/openstack\/cinder\/cinder-volume.log/' /etc/systemd/system/cinder-volume.service"
ansible con -i node -m shell -b -a "systemctl daemon-reload"
ansible con -i node -m shell -b -a "systemctl restart cinder-*"
# cloudkitty
ansible kitty -i lxc -m shell -b -a "sed -i 's/use_journal = True/use_journal = False/g' /etc/cloudkitty/cloudkitty.conf"
ansible kitty -i lxc -m shell -b -a "mkdir /var/log/cloudkitty"
ansible kitty -i lxc -m shell -b -a "chown cloudkitty:cloudkitty /var/log/cloudkitty"
ansible kitty -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --logto \/var\/log\/cloudkitty\/cloudkitty-api.log/' /etc/systemd/system/cloudkitty-api.service"
ansible kitty -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --log-file \/var\/log\/cloudkitty\/cloudkitty-processor.log/' /etc/systemd/system/cloudkitty-processor.service"
ansible kitty -i lxc -m shell -b -a "systemctl daemon-reload"
ansible kitty -i lxc -m shell -b -a "systemctl restart cloudkitty-*"
# glance
ansible con -i node -m shell -b -a "sed -i 's/use_journal = True/use_journal = False/g' /etc/glance/glance-api.conf"
ansible con -i node -m shell -b -a "mkdir /var/log/openstack/glance"
ansible con -i node -m shell -b -a "chown glance:glance /var/log/openstack/glance"
ansible con -i node -m shell -b -a "sed -i '/^ExecStart/ s/$/ --logto \/var\/log\/openstack\/glance\/glance-api.log/' /etc/systemd/system/glance-api.service"
ansible con -i node -m shell -b -a "systemctl daemon-reload"
ansible con -i node -m shell -b -a "systemctl restart glance-*"
# heat
ansible heat -i lxc -m shell -b -a "sed -i 's/use_journal = True/use_journal = False/g' /etc/heat/heat.conf"
ansible heat -i lxc -m shell -b -a "mkdir /var/log/heat"
ansible heat -i lxc -m shell -b -a "chown heat:heat /var/log/heat"
ansible heat -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --logto \/var\/log\/heat\/heat-api.log/' /etc/systemd/system/heat-api.service"
ansible heat -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --logto \/var\/log\/heat\/heat-api-cfn.log/' /etc/systemd/system/heat-api-cfn.service"
ansible heat -i lxc -m shell -b -a "sed -i '/^ExecStart/ s/$/ --log-file \/var\/log\/heat\/heat-engine.log/' /etc/systemd/system/heat-engine.service"
ansible heat -i lxc -m shell -b -a "systemctl daemon-reload"
ansible heat -i lxc -m shell -b -a "systemctl restart heat-*"
# horizon
ansible horizon -i lxc -m shell -b -a "sed -i 's/ErrorLog/#ErrorLog/g' /etc/apache2/sites-available/openstack-dashboard.conf"
ansible horizon -i lxc -m shell -b -a "systemctl restart apache2.service"
- keystone log 설정
. mkdir-log.sh
keystone
. log-lotate.sh
keystone
vi /etc/keystone/keystone.conf
[DEFAULT]
use_journal = True
# Disable stderr logging
use_stderr = False
debug = True
fatal_deprecations = False
## Oslo.Messaging RPC
transport_url = rabbit://keystone:c9de703026876b9d5dfaf69dd@172.29.236.94:5672,keystone:c9de703026876b9d5dfaf69dd@172.29.236.128:5672,keystone:c9de703026876b9d5dfaf69dd@172.29.236.65:5672//keystone?ssl=0
# 설정
log_dir = /var/log/keystone
systemctl restart keystone-*
- magnum log 설정
. mkdir-log.sh
magnum
. log-lotate.sh
magnum
vi /etc/magnum/magnum.conf
[DEFAULT]
# Disable stderr logging
use_stderr = False
debug = True
use_journal = True
host = 172.29.236.209
transport_url = rabbit://magnum:0d8aa7b43fc1ab98c95@172.29.236.94:5672,magnum:0d8aa7b43fc1ab98c95@172.29.236.128:5672,magnum:0d8aa7b43fc1ab98c95@172.29.236.65:5672//magnum?ssl=0
# 설정
log_dir = /var/log/magnum
systemctl restart magnum-*
- manila log 설정
. mkdir-log.sh
manila
. log-lotate.sh
manila
vi /etc/manila/manila.conf
[DEFAULT]
use_journal = True
# Disable stderr logging
use_stderr = False
debug = True
fatal_deprecations = False
my_ip = 127.0.0.1
default_share_type = cephfsnfs1
share_name_template = share-%s
osapi_share_workers = 16
rootwrap_config = /etc/manila/rootwrap.conf
api_paste_config = /etc/manila/api-paste.ini
## RabbitMQ RPC
executor_thread_pool_size = 64
rpc_response_timeout = 60
transport_url = rabbit://manila:12096f3c372c73387e0bc10@172.29.236.94:5672,manila:12096f3c372c73387e0bc10@172.29.236.128:5672,manila:12096f3c372c73387e0bc10@172.29.236.65:5672//manila?ssl=0
## Quota
quota_shares = 50
quota_snapshots = 50
quota_gigabytes = 1000
quota_snapshot_gigabytes = 1000
quota_share_networks = 10
storage_availability_zone = nova
enabled_share_protocols = NFS,CEPHFS
enabled_share_backends = cephfsnfs1
# All given backend(s)
# 설정
log_dir = /var/log/manila
systemctl restart manila-*
- nova log 설정
. mkdir-log.sh
nova
. log-lotate.sh
nova
vi /etc/nova/nova.conf
[DEFAULT]
# Logs / State
debug = True
use_journal = True
state_path = /var/lib/nova
service_down_time = 120
# Scheduler
cpu_allocation_ratio = 2.0
disk_allocation_ratio = 1.0
ram_allocation_ratio = 1.0
reserved_host_disk_mb = 2048
reserved_host_memory_mb = 2048
# Compute
compute_driver = libvirt.LibvirtDriver
instances_path = /var/lib/nova/instances
allow_resize_to_same_host = True
# Api's
enabled_apis = osapi_compute,metadata
# Rpc all
transport_url = rabbit://nova:ed04473108ff2371fbb9d68e257cfafc971ab9f2546e8a2@172.29.236.94:5672,nova:ed04473108ff2371fbb9d68e257cfafc971ab9f2546e8a2@172.29.236.128:5672,nova:ed04473108ff2371fbb9d68e257cfafc971ab9f2546e8a2@172.29.236.65:5672//nova?ssl=0
# Network
my_ip = 172.29.236.182
# Hypervisor
default_ephemeral_format = ext4
# Ceilometer notification configurations
instance_usage_audit = True
instance_usage_audit_period = hour
# Notifications
# 설정
log_dir = /var/log/nova
lxc-attack $nova_conatiner
$nova_conatiner# systemctl restart nova-*
- neutron log 생성
mkdir /var/log/openstack/neutron
chown neutron:neutron /var/log/openstack/neutron
vi /etc/neutron/neutron.conf
[DEFAULT]
# Disable stderr logging
use_stderr = False
debug = True
fatal_deprecations = False
use_journal = True
## Rpc all
executor_thread_pool_size = 64
rpc_response_timeout = 60
transport_url = rabbit://neutron:35cb353e6067a5f578a73a3f4ca92685d036bab52d9f@172.29.236.94:5672,neutron:35cb353e6067a5f578a73a3f4ca92685d036bab52d9f@172.29.236.128:5672,neutron:35cb353e6067a5f578a73a3f4ca92685d036bab52d9f@172.29.236.65:5672//neutron?ssl=0
# Domain to use for building hostnames
dns_domain = openstacklocal
# Agent
# 설정
log_dir = /var/log/openstack/neutron
systemctl restart neutron-*
. mkdir-log.sh
neutron
. log-lotate.sh
neutron
lxc-attach $neutron-container
vi /etc/systemd/system/neutron-server.service
[Service]
Type = simple
User = neutron
Group = neutron
# 설정
ExecStart = /openstack/venvs/uwsgi-27.4.0-python3/bin/uwsgi --autoload --ini /etc/uwsgi/neutron-server.ini --logto /var/log/neutron/neutron-server.log --pyargv "--config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini"
ExecReload = /openstack/venvs/uwsgi-27.4.0-python3/bin/uwsgi --reload /run/neutron-server/uwsgi/neutron-server.pid
# Give a reasonable amount of time for the server to start up/shut down
TimeoutSec = 120
Restart = on-failure
RestartSec = 2
# This creates a specific slice which all services will operate from
# The accounting options give us the ability to see resource usage through
# the `systemd-cgtop` command.
Slice = uwsgi.slice
# Set Accounting
CPUAccounting = True
BlockIOAccounting = True
MemoryAccounting = True
TasksAccounting = True
# Set Sandboxing
PrivateTmp = False
PrivateDevices = False
PrivateNetwork = False
systemctl daemon-reload
systemctl restart neutron-server
- octavia log 생성
. mkdir-log.sh
octavia
. log-lotate.sh
octavia
vi /etc/octavia/octavia.conf
[DEFAULT]
debug = True
use_journal = True
executor_thread_pool_size = 64
rpc_conn_pool_size = 30
transport_url = rabbit://octavia:e6fe37ef439e7fe05b12fea13ab03374a1c0a59760550@172.29.236.94:5672,octavia:e6fe37ef439e7fe05b12fea13ab03374a1c0a59760550@172.29.236.128:5672,octavia:e6fe37ef439e7fe05b12fea13ab03374a1c0a59760550@172.29.236.65:5672//octavia?ssl=0
# 설정
log_dir = /var/log/octavia
systemctl restart octavia-*
- placement log 생성
. mkdir-log.sh
placement
. log-lotate.sh
placement
vi /etc/placement/placemet.conf
[DEFAULT]
use_journal = True
# 설정
log_dir = /var/log/placement
systemctl restart placement-*
- rabbitmq log 생성
vi /etc/rabbitmq/rabbitmq.conf
collect_statistics_interval = 5000
log.journald = true
# 수정
log.file = rabbitmq.log
heartbeat = 640
ssl_handshake_timeout = 20000
handshake_timeout = 40000
systemctl restart rabbitmq-*
- senlin log 생성
. mkdir-log.sh
senlin
. log-lotate.sh
senlin
vi /etc/senlin/senlin.conf
[DEFAULT]
# Disable stderr logging
use_stderr = False
debug = True
fatal_deprecations = False
use_journal = True
server_keystone_endpoint_type = public
## RPC Backend
transport_url = rabbit://senlin:078dfe824c5a8f851dfe4073795376ea1c6523ec40c8ae7470b3b4d4c5@172.29.236.94:5672,senlin:078dfe824c5a8f851dfe4073795376ea1c6523ec40c8ae7470b3b4d4c5@172.29.236.128:5672,senlin:078dfe824c5a8f851dfe4073795376ea1c6523ec40c8ae7470b3b4d4c5@172.29.236.65:5672//senlin?ssl=0
# Default region name used to get services endpoints.
region_name_for_services = RegionOne
## Tunable option
max_clusters_per_project = 100
max_nodes_per_cluster = 1000
periodic_interval = 60
periodic_interval_max = 120
periodic_fuzzy_delay = 10
health_check_interval_min = 60
check_interval_max = 3600
max_response_size = 524288
default_action_timeout = 3600
default_nova_timeout = 600
max_actions_per_batch = 0
batch_interval = 3
lock_retry_times = 3
lock_retry_interval = 10
database_retry_limit = 10
database_max_retry_interval = 2
engine_life_check_timeout = 2
service_down_time = 60
# 설정
log_dir = /var/log/senlin
systemctl restart senlin-*
- zookeeper log 생성
. mkdir-log.sh
senlin
. zookeper-lotate.sh
vi /etc/zookeeper/zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# Place the dataLogDir to a separate physical disc for better performance
# 수정
dataLogDir=/var/log/zookeeper
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/var/lib/zookeeper
# the port at which the clients will connect
clientPort=2181
systemctl restart zookeeper
- symlink 생성
- aodh, ceilometer, heat, keystone, nova, neutron, magnum, octavia, placement, zookeeper
- create-symlink.sh
#!/bin/sh
read service
readarray -t container_id < <(lxc-ls -1 | grep $service)
echo $container_id
create_symlink() {
echo "-------------------------------------------------------"
echo "$1 symlink"
echo "-------------------------------------------------------"
mkdir /var/log/openstack/$service
ln -s /openstack/log/$1/$service/* /var/log/openstack/$service
ls -al /var/log/openstack/$service
}
create_symlink "$container_id"
- horizon symlink 생성
- horizon-symlink.sh
#!/bin/sh
service='horizon'
readarray -t container_id < <(lxc-ls -1 | grep $service)
echo $container_id
create_symlink() {
echo "-------------------------------------------------------"
echo "$1 symlink"
echo "-------------------------------------------------------"
mkdir /var/log/openstack/$service
ln -s /openstack/log/$1/apache2/* /var/log/openstack/$service
ls -al /var/log/openstack/$service
}
create_symlink "$container_id"
- rabbitmq symlink 생성
- rabbitmq-symlink.sh
#!/bin/sh
service='rabbit_mq'
readarray -t container_id < <(lxc-ls -1 | grep $service)
echo $container_id
create_symlink() {
echo "-------------------------------------------------------"
echo "$1 symlink"
echo "-------------------------------------------------------"
mkdir /var/log/openstack/rabbitmq
ln -s /openstack/log/$1/rabbitmq/* /var/log/openstack/rabbitmq
ls -al /var/log/openstack/rabbitmq
}
create_symlink "$container_id"
- 방법에 적히지 않은 service는 /openstack/log/$container/ 해당 경로에 log 파일이 없는 경우
- container안에 있는 log를 꺼내 올 방법 확인 필요
- extra symlink 생성
- adjutant, cloudkitty, manila, senlin
- extra symlink 생성
- create-extra-symlink.sh
#!/bin/sh
read service
readarray -t container_id < <(lxc-ls -1 | grep $service)
echo $container_id
create_symlink() {
echo "-------------------------------------------------------"
echo "$1 symlink"
echo "-------------------------------------------------------"
mkdir /var/log/openstack/$service
ln -s /var/lib/lxc/$1/rootfs/var/log/$service/* /var/log/openstack/$service
ls -al /var/log/openstack/$service
}
create_symlink "$container_id"
[Compute Node]
- Nova log 설정
mkdir /var/log/nova
chmod 777 /var/log/nova
chown nova:nova /var/log/nova
vi /etc/systemd/system/nova-compute.service
[Unit]
Description = nova-compute service
After = libvirtd.service
After = network.target
After = syslog.target
[Service]
Type = simple
User = nova
Group = nova
# 설정
ExecStart = /openstack/venvs/nova-27.4.0/bin/nova-compute --log-file=/var/log/nova/nova-compute.log
ExecReload = /bin/kill -HUP $MAINPID
# Give a reasonable amount of time for the server to start up/shut down
TimeoutSec = 120
Restart = on-failure
RestartSec = 2
# This creates a specific slice which all services will operate from
# The accounting options give us the ability to see resource usage through
# the `systemd-cgtop` command.
Slice = nova.slice
# Set Accounting
CPUAccounting = True
BlockIOAccounting = True
MemoryAccounting = True
TasksAccounting = True
# Set Sandboxing
PrivateTmp = False
PrivateDevices = False
PrivateNetwork = False
[Install]
WantedBy = multi-user.target
systemctl daemon-reload
systemctl restart nova-compute
vi /etc/logrotate.d/nova_log_rotate
/var/log/nova/nova-compute.log
{
copytruncate
weekly
missingok
rotate 4
compress
dateext
maxage 30
notifempty
nocreate
sharedscripts
postrotate
systemctl restart rsyslog > /dev/null 2>&1 || true
endscript
}
- Neutron log 설정
mkdir /var/log/neutron
chmod 777 /var/log/neutron
chown neutron:neutron /var/log/neutron
vi /etc/neutron/neutron.conf
[DEFAULT]
# Disable stderr logging
use_stderr = False
debug = True
fatal_deprecations = False
use_journal = True
## Rpc all
executor_thread_pool_size = 64
rpc_response_timeout = 60
transport_url = rabbit://neutron:35cb353e6067a5f578a73a3f4ca92685d036bab52d9f@172.29.236.94:5672,neutron:35cb353e6067a5f578a73a3f4ca92685d036bab52d9f@172.29.236.128:5672,neutron:35cb353e6067a5f578a73a3f4ca92685d036bab52d9f@172.29.236.65:5672//neutron?ssl=0
# Domain to use for building hostnames
dns_domain = openstacklocal
# Agent
# 설정
log_file = /var/log/neutron/neutron.log
[agent]
polling_interval = 5
report_interval = 60
root_helper = sudo /openstack/venvs/neutron-27.4.0/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
root_helper_daemon = sudo /openstack/venvs/neutron-27.4.0/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
# Messaging
[oslo_messaging_rabbit]
rpc_conn_pool_size = 30
heartbeat_in_pthread = False
# Notifications
[oslo_messaging_notifications]
topics = notifications
driver = messagingv2
transport_url = rabbit://neutron:35cb353e6067a5f578a73a3f4ca92685d036bab52d9f@172.29.236.94:5672,neutron:35cb353e6067a5f578a73a3f4ca92685d036bab52d9f@172.29.236.128:5672,neutron:35cb353e6067a5f578a73a3f4ca92685d036bab52d9f@172.29.236.65:5672//neutron?ssl=0
# Concurrency (locking mechanisms)
[oslo_concurrency]
lock_path = /run/lock/neutron
vi /etc/systemd/system/neutron-metadata-agent.service
[Unit]
Description = neutron-metadata-agent service
After = network-online.target
After = syslog.target
[Service]
Type = simple
User = neutron
Group = neutron
# 설정
ExecStart = /openstack/venvs/neutron-27.4.0/bin/neutron-metadata-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metadata_agent.ini --log-file=/var/log/neutron/neutron-metadata-agent.log
ExecReload = /bin/kill -HUP $MAINPID
# Give a reasonable amount of time for the server to start up/shut down
TimeoutSec = 120
Restart = on-failure
RestartSec = 2
# This creates a specific slice which all services will operate from
# The accounting options give us the ability to see resource usage through
# the `systemd-cgtop` command.
Slice = neutron.slice
# Set Accounting
CPUAccounting = True
BlockIOAccounting = True
MemoryAccounting = True
TasksAccounting = True
# Set Sandboxing
PrivateTmp = False
PrivateDevices = False
PrivateNetwork = False
[Install]
WantedBy = multi-user.target
vi /etc/systemd/system/neutron-l3-agent.service
[Unit]
Description = neutron-l3-agent service
After = network-online.target
After = syslog.target
[Service]
Type = simple
User = neutron
Group = neutron
Environment = "PATH=/openstack/venvs/neutron-27.4.0/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# 설정
ExecStart = /openstack/venvs/neutron-27.4.0/bin/neutron-l3-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini --log-file=/var/log/neutron/neutron-l3-agent.log
ExecReload = /bin/kill -HUP $MAINPID
# Give a reasonable amount of time for the server to start up/shut down
TimeoutSec = 120
Restart = on-failure
RestartSec = 2
# This creates a specific slice which all services will operate from
# The accounting options give us the ability to see resource usage through
# the `systemd-cgtop` command.
Slice = neutron.slice
# Set Accounting
CPUAccounting = True
BlockIOAccounting = True
MemoryAccounting = True
TasksAccounting = True
# Set Sandboxing
PrivateTmp = False
PrivateDevices = False
PrivateNetwork = False
KillMode = process
[Install]
WantedBy = multi-user.target
vi /etc/systemd/system/neutron-openvswitch-agent.service
[Unit]
Description = neutron-openvswitch-agent service
After = network-online.target
After = syslog.target
[Service]
Type = simple
User = neutron
Group = neutron
# 설정
ExecStart = /openstack/venvs/neutron-27.4.0/bin/neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --log-file=/var/log/neutron/neutron-openvswitch-agent.log
ExecReload = /bin/kill -HUP $MAINPID
# Give a reasonable amount of time for the server to start up/shut down
TimeoutSec = 120
Restart = on-failure
RestartSec = 2
# This creates a specific slice which all services will operate from
# The accounting options give us the ability to see resource usage through
# the `systemd-cgtop` command.
Slice = neutron.slice
# Set Accounting
CPUAccounting = True
BlockIOAccounting = True
MemoryAccounting = True
TasksAccounting = True
# Set Sandboxing
PrivateTmp = False
PrivateDevices = False
PrivateNetwork = False
[Install]
WantedBy = multi-user.target
systemctl daemon-reload
systemctl restart neutron-*
vi /etc/logrotate.d/neutron_log_rotate
/var/log/neutron/neutron-l3-agent.log
/var/log/neutron/neutron-metadata-agent.log
/var/log/neutron/neutron-openvswitch-agent.log
/var/log/neutron/neutron.log
{
copytruncate
weekly
missingok
rotate 4
compress
dateext
maxage 30
notifempty
nocreate
sharedscripts
postrotate
systemctl restart rsyslog > /dev/null 2>&1 || true
endscript
}
'Openstack' 카테고리의 다른 글
Openstack Too many connection 완화 (0) | 2024.05.24 |
---|---|
Openstack Placement Filter 활성화 (0) | 2024.05.24 |
Openstack Livemigration 설정 (0) | 2024.05.24 |
Openstack Image 다른 Project에 공유 (0) | 2024.05.24 |
Openstack Window 이미지 사용 (0) | 2024.05.24 |