You are on page 1of 43

Infrastructure setup

Planed setup
Below are environment minimum :

Server Role Disk(GB) RAM (GB) CPU *OS External Switch Internal Switch
ICO Server, Jazz and Cost 150 8 4 RHEL 7.5 x64 192.168.202.21 10.0.0.20
Openstack Controller 150 8 4 RHEL 7.5 x64 192.168.202.22 10.0.0.30
Openstack Network Node 100 8 4 RHEL 7.5 x64 192.168.202.23 10.0.0.40
Openstack Compute Node 100 16 8 RHEL 7.5 x64 192.168.202.24 10.0.0.50
Openstack Storage Node 100 4 4 RHEL 7.5 x64 192.168.202.25 10.0.0.60

Server Layout Partition


Disk space :

Server Role Space allocated in GB per directory


/ /home /opt /var /tmp /usr
ICO Server 20 25 60 5 20 -
Openstack Controllers 20 20 50 10 30 -
OpenStack Networknode 100 - - - - -
OpenStack ComputeNode 100 - - - - -
OpenStack StorageNode 100 - - - - -
Install OS with minimum installation and subscribe to Redhat
Pre-requisite all server :
- Edit /etc/hosts and insert IP, FQDN and shortname :
192.168.202.21 orkestrator.cloud.local orkestrator
192.168.202.22 controller.cloud.local controller
192.168.202.23 networknode.cloud.local networknode
192.168.202.24 computenode.cloud.local computenode
192.168.202.25 storagenode.cloud.local storagenode

- Selinux disabled
- Disable NetworkManager
- Disable Firewalld
Redhat subscription register
Ref = https://access.redhat.com/solutions/253273
# subscription-manager register
# subscription-manager list --available
# subscription-manager attach --auto
# subscription-manager repos --disable=*
# subscription-manager repos --enable=rhel-7-server-rpms
# subscription-manager repos --enable=rhel-7-server-optional-rpms
# subscription-manager repos --enable=rhel-7-server-extras-rpms
# subscription-manager repos --enable=rhel-7-server-openstack-13*
# yum install -y yum-plugin-priorities yum-utils
# yum-config-manager --setopt=”rhel-7-server-openstack-13-rpms.priority=1” --enable rhel-7-server-openstack-13-rpms
Installation OpenStack Quuens
Pre-Requisite server Controller, Networknode, Compute & Storage Node
Install chrony
# yum -y install chrony
# vi /etc/chrony.conf
line 3: change servers for synchronization
server NTP_SERVER_CONTROLLER iburst
# line 25: add the network range you allow to receive requests
allow 10.0.0.0/24
# systemctl start chronyd
# systemctl enable chronyd

Install dependencies other :


# yum install net-tools lsof wget deltarpm
Install OpenStack Queen repo
# yum -y install centos-release-openstack-queens epel-release
Update OS and Reboot
# yum update -y --enablerepo=centos-openstack-queens,epel
Disable and Stop NetworkManager
# systemctl stop NetworkManager
# systemctl disable NetworkManager
Disable and Stop firewalld
# systemctl stop firewalld
# systemctl disable firewalld
Disable SELinux
Reboot server
# init 6

Install Service

- MariaDB - RabbitMQ - Gnocchi


- Memcached - httpd - Ceilometer-Center
- Keystone - Glance - Aodh_Evaluator
- Nova API - Neutron Server
yum -y install centos-release-openstack-queens epel-release
yum --enablerepo=centos-openstack-queens -y install mariadb mariadb-server python2-PyMySQL
yum --enablerepo=centos-openstack-queens -y install rabbitmq-server memcached
yum --enablerepo=centos-openstack-queens,epel -y install openstack-keystone openstack-utils python-openstackclient httpd mod_wsgi
yum --enablerepo=centos-openstack-queens,epel -y install openstack-glance
yum --enablerepo=centos-openstack-queens,epel -y install openstack-nova
yum --enablerepo=centos-openstack-queens,epel -y install openstack-neutron openstack-neutron-ml2
yum --enablerepo=centos-openstack-queens,epel -y install openstack-dashboard
yum --enablerepo=centos-openstack-queens,epel -y install openstack-cinder
yum --enablerepo=centos-openstack-queens,epel -y install openstack-heat-common
yum --enablerepo=centos-openstack-queens,epel -y install openstack-barbican
yum --enablerepo=centos-openstack-queens,epel -y install openstack-gnocchi-api openstack-gnocchi-metricd python2-gnocchiclient
yum --enablerepo=centos-openstack-queens,epel -y install openstack-ceilometer-central openstack-ceilometer-notification python2-
ceilometerclient
yum --enablerepo=centos-openstack-queens,epel -y install openstack-heat-api openstack-heat-api-cfn openstack-heat-engine python-
heatclient

Configure Services
MariaDB Configuration.
Create and edit the /etc/my.cnf.d/openstack.cnf file (backup existing configuration files in /etc/my.cnf.d/ if needed) and complete
the following actions:
Create a [mysqld] section, and set the bind-address key to the management IP address of the controller node to enable access by
other nodes via the management network. Set additional keys to enable useful options and the UTF-8 character set:

[mysqld]
# systemctl start mariadb.service
# systemctl enable mariadb.service
# mysql_secure_installation

MariaDB Create Database and User previllage :


#mysql -u root -p
create database keystone;
create database glance;
create database nova;
create database nova_api;
create database nova_placement;
create database nova_cell0;
create database neutron_ml2;
create database cinder;
create database manila;
create database heat;
create database barbican;
create database gnocchi;
create database rally;
create database aodh;
grant all privileges on aodh.* to aodh@'localhost' identified by 'password';
grant all privileges on aodh.* to aodh@'%' identified by 'password';
grant all privileges on rally.* to rally@'localhost' identified by 'password';
grant all privileges on rally.* to rally@'%' identified by 'password';
grant all privileges on gnocchi.* to gnocchi@'localhost' identified by 'password';
grant all privileges on gnocchi.* to gnocchi@'%' identified by 'password';
grant all privileges on barbican.* to barbican@'localhost' identified by 'password';
grant all privileges on barbican.* to barbican@'%' identified by 'password';
grant all privileges on heat.* to heat@'localhost' identified by 'password';
grant all privileges on heat.* to heat@'%' identified by 'password';
grant all privileges on manila.* to manila@'localhost' identified by 'password';
grant all privileges on manila.* to manila@'%' identified by 'password';
grant all privileges on keystone.* to keystone@'localhost' identified by 'password';
grant all privileges on keystone.* to keystone@'%' identified by 'password';
grant all privileges on glance.* to glance@'localhost' identified by 'password';
grant all privileges on glance.* to glance@'%' identified by 'password';
grant all privileges on nova.* to nova@'localhost' identified by 'password';
grant all privileges on nova.* to nova@'%' identified by 'password';
grant all privileges on nova_api.* to nova@'localhost' identified by 'password';
grant all privileges on nova_api.* to nova@'%' identified by 'password';
grant all privileges on nova_placement.* to nova@'localhost' identified by 'password';
grant all privileges on nova_placement.* to nova@'%' identified by 'password';
grant all privileges on nova_cell0.* to nova@'localhost' identified by 'password';
grant all privileges on nova_cell0.* to nova@'%' identified by 'password';
grant all privileges on neutron_ml2.* to neutron@'localhost' identified by 'password';
grant all privileges on neutron_ml2.* to neutron@'%' identified by 'password';
grant all privileges on cinder.* to cinder@'localhost' identified by 'password';
grant all privileges on cinder.* to cinder@'%' identified by 'password';
flush privileges;
exit

Simple scripts to add multiple database one time import.

create_database.sql
Then run : mysql -u root -p < create_database.sql
Configure RabbitMQ, Memcached.
# systemctl start rabbitmq-server memcached
# systemctl enable rabbitmq-server memcached

- add openstack user (set any password you like)


# rabbitmqctl add_user openstack password
# rabbitmqctl set_permissions openstack ".*" ".*" ".*"

Configure Keystone
# vi /etc/keystone/keystone.conf
# line 606: uncomment and specify Memcache server
memcache_servers = controller:11211
# line 738: add ( MariaDB connection info )
connection = mysql+pymysql://keystone:password@controller/keystone
[token]
# line 2879: add
provider = fernet
# su -s /bin/bash keystone -c "keystone-manage db_sync"
# initialize keys
# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
# define own host (controller host)
# echo 'export controller=192.168.202.22' >>~/.bash_profile
# source .bash_profile
# echo $controller
# bootstrap keystone (replace any password you like for "adminpassword" section)
# keystone-manage bootstrap --bootstrap-password adminpassword \
--bootstrap-admin-url http://controller:5000/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOne

# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
# systemctl start httpd
# systemctl enable httpd

Create and Load environment variables file.


The password for [OS_PASSWORD] is the one you set it on bootstrapping keystone.
The URL for [OS_AUTH_URL] is the Keystone server's hostname or IP address.
Create Service Project

# vi ~/keystonerc

export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=adminpassword
export OS_AUTH_URL=http://controller:5000/v3
export OS_REGION_NAME=RegionOne
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
export PS1='[\u@\h \W(keystone)]\$'

# chmod 600 ~/keystonerc


# source ~/keystonerc
~(keystone)]#echo "source ~/keystonerc " >> ~/.bash_profile
# openstack project create --domain default --description "Service Project" service
# openstack domain create democloud --description "Demo Private Cloud"
# openstack project list

# openstack user create --domain default --project service --password servicepassword glance
# openstack role add --project service --user glance admin
# openstack service create --name glance --description "OpenStack Image service" image
# openstack endpoint create --region RegionOne image public http://controller:9292
# openstack endpoint create --region RegionOne image internal http://controller:9292
# openstack endpoint create --region RegionOne image admin http://controller:9292

# openstack user create --domain default --project service --password servicepassword nova
# openstack role add --project service --user nova admin
# openstack user create --domain default --project service --password servicepassword placement
# openstack role add --project service --user placement admin
# openstack service create --name nova --description "OpenStack Compute service" compute
# openstack service create --name placement --description "OpenStack Compute Placement service" placement
# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1/%\(tenant_id\)s
# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1/%\(tenant_id\)s
# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1/%\(tenant_id\)s
# openstack endpoint create --region RegionOne placement public http://controller:8778
# openstack endpoint create --region RegionOne placement internal http://controller:8778
# openstack endpoint create --region RegionOne placement admin http://controller:8778
# openstack user create --domain default --project service --password servicepassword neutron
# openstack role add --project service --user neutron admin
# openstack service create --name neutron --description "OpenStack Networking service" network
# openstack endpoint create --region RegionOne network public http://controller:9696
# openstack endpoint create --region RegionOne network internal http://controller:9696
# openstack endpoint create --region RegionOne network admin http://controller:9696

# openstack user create --domain default --project service --password servicepassword cinder
# openstack role add --project service --user cinder admin
# openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
# openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
# openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(tenant_id\)s
# openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(tenant_id\)s
# openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(tenant_id\)s
# openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(tenant_id\)s
# openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(tenant_id\)s
# openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(tenant_id\)s

#openstack user create --domain default --project service --password servicepassword swift
#openstack role add --project service --user swift admin
#openstack service create --name swift --description "OpenStack Object Storage" object-store
# echo 'export swift_proxy=192.168.202.9' >>~/.bash_profile
# source .bash_profile
# echo $swift_proxy
# openstack endpoint create --region RegionOne object-store public http://swift_proxy:8080/v1/AUTH_%\(tenant_id\)s
# openstack endpoint create --region RegionOne object-store internal http://swift_proxy:8080/v1/AUTH_%\(tenant_id\)s
# openstack endpoint create --region RegionOne object-store admin http://swift_proxy:8080/v1
# openstack user create --domain default --project service --password servicepassword heat
# openstack role add --project service --user heat admin
# openstack role create heat_stack_owner
# openstack role create heat_stack_user
# openstack role add --project admin --user admin heat_stack_owner
# openstack service create --name heat --description "Openstack Orchestration" orchestration
# openstack service create --name heat-cfn --description "Openstack Orchestration" cloudformation
# echo 'export heat_api=192.168.202.23' >>~/.bash_profile
# source .bash_profile
# echo $heat_api
# openstack endpoint create --region RegionOne orchestration public http://heat_api:8004/v1/%\(tenant_id\)s
# openstack endpoint create --region RegionOne orchestration internal http://heat_api:8004/v1/%\(tenant_id\)s
# openstack endpoint create --region RegionOne orchestration admin http://heat_api:8004/v1/%\(tenant_id\)s
# openstack endpoint create --region RegionOne cloudformation public http://heat_api:8000/v1
# openstack endpoint create --region RegionOne cloudformation internal http://heat_api:8000/v1
# openstack endpoint create --region RegionOne cloudformation admin http://heat_api:8000/v1
# openstack domain create --description "Stack projects and users" heat
# openstack user create --domain heat --password servicepassword heat_domain_admin
# openstack role add --domain heat --user heat_domain_admin admin

# openstack user create --domain default --project service --password servicepassword barbican
# openstack role add --project service --user barbican admin
# openstack service create --name barbican --description "OpenStack Key Manager" key-manager
# openstack endpoint create --region RegionOne key-manager public http://controller:9311
# openstack endpoint create --region RegionOne key-manager internal http://controller:9311
# openstack endpoint create --region RegionOne key-manager admin http://controller:9311

# openstack user create --domain default --project service --password servicepassword gnocchi
# openstack role add --project service --user gnocchi admin
# openstack service create --name gnocchi --description "Metric Service" metric
# openstack endpoint create --region RegionOne metric public http://controller:8041
# openstack endpoint create --region RegionOne metric internal http://controller:8041
# openstack endpoint create --region RegionOne metric admin http://controller:8041
# openstack user create --domain default --project service --password servicepassword ceilometer
# openstack role add --project service --user ceilometer admin
# openstack service create --name ceilometer --description "OpenStack Telemetry Service" metering

# openstack user create --domain default --project service --password servicepassword aodh
# openstack role add --project service --user aodh admin
# openstack service create --name aodh --description "Telemetry Alarming" alarming
# openstack endpoint create --region RegionOne alarming public http://controller:8042
# openstack endpoint create --region RegionOne alarming internal http://controller:8042
# openstack endpoint create --region RegionOne alarming admin http://controller:8042
Configure Glance :
# mv /etc/glance/glance-api.conf /etc/glance/glance-api.conf.org
# vi /etc/glance/glance-api.conf

[DEFAULT]
bind_host = 0.0.0.0

[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

[database]
# MariaDB connection info
connection = mysql+pymysql://glance:password@controller/glance

# keystone auth info


[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = servicepassword

[paste_deploy]
flavor = keystone

(keystone)]# mv /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.org


(keystone)]# vi /etc/glance/glance-registry.conf

[DEFAULT]
bind_host = 0.0.0.0

[database]
# MariaDB connection info
connection = mysql+pymysql://glance:password@controller/glance

# Keystone auth info


[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = servicepassword

[paste_deploy]
flavor = keystone

# chmod 640 /etc/glance/glance-api.conf /etc/glance/glance-registry.conf


# chown root:glance /etc/glance/glance-api.conf /etc/glance/glance-registry.conf
# su -s /bin/bash glance -c "glance-manage db_sync"
# systemctl start openstack-glance-api openstack-glance-registry
# systemctl enable openstack-glance-api openstack-glance-registry
Configure Nova-API & Nova Service
# mv /etc/nova/nova.conf /etc/nova/nova.conf.org
# vi /etc/nova/nova.conf

[DEFAULT]
# define own IP
my_ip = 192.168.202.22
state_path = /var/lib/nova
enabled_apis = osapi_compute,metadata
log_dir = /var/log/nova
# RabbitMQ connection info
transport_url = rabbit://openstack:password@controller

[api]
auth_strategy = keystone

# Glance connection info


[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = $state_path/tmp

# MariaDB connection info


[api_database]
connection = mysql+pymysql://nova:password@controller/nova_api

[database]
connection = mysql+pymysql://nova:password@controller/nova

# Keystone auth info


[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = servicepassword

[placement]
auth_url = http://controller:5000
os_region_name = RegionOne
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = placement
password = servicepassword

[placement_database]
connection = mysql+pymysql://nova:password@controller/nova_placement

[wsgi]
api_paste_config = /etc/nova/api-paste.ini

# chmod 640 /etc/nova/nova.conf


# chgrp nova /etc/nova/nova.conf
# vi /etc/httpd/conf.d/00-nova-placement-api.conf

# add near line 15

<Directory /usr/bin>
Require all granted
</Directory>

</VirtualHost>

Add Data into Database and start Nova services.

# su -s /bin/bash nova -c "nova-manage api_db sync"


# su -s /bin/bash nova -c "nova-manage cell_v2 map_cell0"
# su -s /bin/bash nova -c "nova-manage db sync"
# su -s /bin/bash nova -c "nova-manage cell_v2 create_cell --name cell1"
# systemctl restart httpd
# chown nova. /var/log/nova/nova-placement-api.log
# for service in api consoleauth conductor scheduler novncproxy; do
systemctl start openstack-nova-$service
systemctl enable openstack-nova-$service
done

verify service running :


#openstack compute service list
+----+------------------+------------+----------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+------------------+------------+----------+---------+-------+----------------------------+
| 4 | nova-consoleauth | controller | internal | enabled | up | 2019-01-24T06:28:23.000000 |
| 5 | nova-conductor | controller | internal | enabled | up | 2019-01-24T06:28:19.000000 |
| 7 | nova-scheduler | controller | internal | enabled | up | 2019-01-24T06:28:24.000000 |
+----+------------------+------------+----------+---------+-------+---------------------------
(keystone)]# mv /etc/neutron/neutron.conf /etc/neutron/neutron.conf.org
(keystone)]# vi /etc/neutron/neutron.conf

[DEFAULT]
core_plugin = ml2
service_plugins = router
auth_strategy = keystone
state_path = /var/lib/neutron
dhcp_agent_notification = True
allow_overlapping_ips = True
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
# RabbitMQ connection info
transport_url = rabbit://openstack:password@controller

# Keystone auth info


[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = servicepassword

# MariaDB connection info


[database]
connection = mysql+pymysql://neutron:password@controller/neutron_ml2

# Nova connection info


[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = servicepassword

[oslo_concurrency]
lock_path = $state_path/tmp

(keystone)]# chmod 640 /etc/neutron/neutron.conf


(keystone)]# chgrp neutron /etc/neutron/neutron.conf

(keystone)]# vi /etc/neutron/metadata_agent.ini
# line 22: uncomment and specify Nova API server
nova_metadata_host = controller
# line 34: uncomment and specify any secret key you like
metadata_proxy_shared_secret = metadata_secret
# line 260: uncomment and specify Memcache server
memcache_servers = controller:11211

(keystone)]# vi /etc/neutron/plugins/ml2/ml2_conf.ini
# line 129: add ( it's OK with no value for "tenant_network_types" (set later if need) )
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types =
mechanism_drivers = openvswitch,l2population,linuxbridge
extension_drivers = port_security
(keystone)]# vi /etc/nova/nova.conf
# add follows into [DEFAULT] section
use_neutron = True
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
vif_plugging_is_fatal = True
vif_plugging_timeout = 300

# add follows to the end : Neutron auth info


# the value of metadata_proxy_shared_secret is the same with the one in metadata_agent.ini
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = servicepassword
service_metadata_proxy = True
metadata_proxy_shared_secret = metadata_secret

Start Neutron services

(keystone)]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini


(keystone)]# su -s /bin/bash neutron -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file
/etc/neutron/plugin.ini upgrade head"
(keystone)]# for service in server metadata-agent; do
systemctl start neutron-$service
systemctl enable neutron-$service
done
(keystone)]# systemctl restart openstack-nova-api
Configure Horizon Dashboard

(keystone)]# vi /etc/openstack-dashboard/local_settings
# line 38: add Dashboard Host

ALLOWED_HOSTS = ['controller', 'localhost']


# line 64: uncomment like follows

OPENSTACK_API_VERSIONS = {
# "data-processing": 1.1,
"identity": 3,
"volume": 2,
"compute": 2,
}

# line 75: uncomment and change

OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
# line 97: uncomment

OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
# line 167,168: change and add Memcache server

CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
},
}

# line 189: change OPENSTACK_HOST to your own one


OPENSTACK_HOST = "controller"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "member"

(keystone)]# vi /etc/httpd/conf.d/openstack-dashboard.conf
# near line 4: add
WSGIDaemonProcess dashboard
WSGIProcessGroup dashboard
WSGISocketPrefix run/wsgi
WSGIApplicationGroup %{GLOBAL}

(keystone)]# systemctl restart httpd


(keystone)]# openstack project create --domain default --description "PrivateCloud Project" PrivateCloud
(keystone)]# openstack user create --domain default --project PrivateCloud --password userpassword sysadmin
(keystone)]# openstack role create sysadmin
(keystone)]# openstack role add --project PrivateCloud --user sysadmin sysadmin
(keystone)]# openstack flavor create --id 0 --vcpus 1 --ram 1024 --disk 20 m1.small

Configure Heat Orchestration Service


# mv /etc/heat/heat.conf /etc/heat/heat.conf.org
# vi /etc/heat/heat.conf

[DEFAULT]
deferred_auth_method = trusts
trusts_delegated_roles = heat_stack_owner
# Heat installed server
heat_metadata_server_url = http://controller:8000
heat_waitcondition_server_url = http://controller:8000/v1/waitcondition
heat_watch_server_url = http://controller:8003
heat_stack_user_role = heat_stack_user
# Heat domain name
stack_user_domain_name = heat
# Heat domain admin name
stack_domain_admin = heat_domain_admin
# Heat domain admin's password
stack_domain_admin_password = servicepassword
# RabbitMQ connection info
transport_url = rabbit://openstack:password@controller

# MariaDB connection info


[database]
connection = mysql+pymysql://heat:password@controller/heat

# Keystone auth info


[clients_keystone]
auth_uri = http://controller:5000
# Keystone auth info
[ec2authtoken]
auth_uri = http://controller:5000

[heat_api]
bind_host = 0.0.0.0
bind_port = 8004

[heat_api_cfn]
bind_host = 0.0.0.0
bind_port = 8000

# Keystone auth info


[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = heat
password = servicepassword

[trustee]
auth_plugin = password
auth_url = http://controller:5000
username = heat
password = servicepassword
user_domain_name = default

# chgrp heat /etc/heat/heat.conf


# chmod 640 /etc/heat/heat.conf
# su -s /bin/bash heat -c "heat-manage db_sync"
# systemctl start openstack-heat-api openstack-heat-api-cfn openstack-heat-engine
# systemctl enable openstack-heat-api openstack-heat-api-cfn openstack-heat-engine
Configure NOVA Node

- LibVirt
- NovaCompute
- OpenVswitch
- L2 Agent
- CeiloMeter Compute

yum -y install qemu-kvm libvirt virt-install bridge-utils


yum --enablerepo=centos-openstack-queens,epel -y install openstack-nova-compute
yum --enablerepo=centos-openstack-queens,epel -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch
yum --enablerepo=centos-openstack-queens,epel -y install openstack-ceilometer-compute

[root@compute01 ~]# lsmod | grep kvm


kvm_intel 183621 0
kvm 586948 1 kvm_intel
irqbypass 13503 1 kvm

[root@compute01 ~]# systemctl start libvirtd


[root@compute01 ~]# systemctl enable libvirtd

vi /etc/sysctl.conf
net.ipv4.ip_forward = 1
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.all.rp_filter=0

# sysctl -p

# virsh net-list
# virsh net-destroy default
# virsh net-autostart --network default --disable

# mv /etc/nova/nova.conf /etc/nova/nova.conf.org
# vi /etc/nova/nova.conf
[DEFAULT]
# define own IP address
my_ip = 192.168.202.24
state_path = /var/lib/nova
enabled_apis = osapi_compute,metadata
log_dir = /var/log/nova
# RabbitMQ connection info
transport_url = rabbit://openstack:password@controller

[api]
auth_strategy = keystone

# enable VNC
[vnc]
enabled = True
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html

# Glance connection info


[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = $state_path/tmp

# Keystone auth info


[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = servicepassword
[placement]
auth_url = http://controller:5000
os_region_name = RegionOne
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = placement
password = servicepassword

[wsgi]
api_paste_config = /etc/nova/api-paste.ini

# chmod 640 /etc/nova/nova.conf


# chgrp nova /etc/nova/nova.conf
# systemctl start openstack-nova-compute
# systemctl enable openstack-nova-compute

[root@controller ~(keystone)]#su -s /bin/bash nova -c "nova-manage cell_v2 discover_hosts"


/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s)
['use_tpool'] not supported
exception.NotSupportedWarning
[root@controller ~(keystone)]#openstack compute service list
+----+------------------+------------+----------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+------------------+------------+----------+---------+-------+----------------------------+
| 4 | nova-consoleauth | controller | internal | enabled | up | 2019-01-24T06:41:44.000000 |
| 5 | nova-conductor | controller | internal | enabled | up | 2019-01-24T06:41:39.000000 |
| 7 | nova-scheduler | controller | internal | enabled | up | 2019-01-24T06:41:44.000000 |
| 8 | nova-compute | compute01 | nova | enabled | up | 2019-01-24T06:41:43.000000 |
+----+------------------+------------+----------+---------+-------+----------------------------+

Configure Neutron service on Compute Node


[root@compute01 ~]# mv /etc/neutron/neutron.conf /etc/neutron/neutron.conf.org
[root@compute01 ~]# vi /etc/neutron/neutron.conf

[DEFAULT]
core_plugin = ml2
service_plugins = router
auth_strategy = keystone
state_path = /var/lib/neutron
allow_overlapping_ips = True
# RabbitMQ connection info
transport_url = rabbit://openstack:password@controller

# Keystone auth info


[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = servicepassword

[oslo_concurrency]
lock_path = $state_path/lock

[root@compute01 ~]# chmod 640 /etc/neutron/neutron.conf


[root@compute01 ~]# chgrp neutron /etc/neutron/neutron.conf
[root@compute01 ~]# vi /etc/neutron/plugins/ml2/ml2_conf.ini

# line 129: add ( it's OK with no value for "tenant_network_types" (set later if need) )
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types =
mechanism_drivers = openvswitch,l2population,linuxbridge
extension_drivers = port_security

[root@compute01 ~]# vi /etc/neutron/plugins/ml2/openvswitch_agent.ini

# line 308: add


[securitygroup]
firewall_driver = openvswitch
enable_security_group = true
enable_ipset = true

[root@compute01 ~]# vi /etc/nova/nova.conf


# add follows into [DEFAULT] section

use_neutron = True
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
vif_plugging_is_fatal = True
vif_plugging_timeout = 300

# add follows to the end: Neutron auth info


# the value of metadata_proxy_shared_secret is the same with the one in metadata_agent.ini
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = servicepassword
service_metadata_proxy = True
metadata_proxy_shared_secret = metadata_secret

[root@compute01 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini


[root@compute01 ~]# systemctl start openvswitch
[root@compute01 ~]# systemctl enable openvswitch
[root@compute01 ~]# ovs-vsctl add-br br-int
[root@compute01 ~]# systemctl restart openstack-nova-compute
[root@compute01 ~]# systemctl start neutron-openvswitch-agent
[root@compute01 ~]# systemctl enable neutron-openvswitch-agent

Configure Bridge ethernet on Compute 01

[root@compute01 ~]# cp /etc/sysconfig/network-scripts/ifcfg-eth0 /root/ifcfg-eth0.backup


[root@compute01 ~]# cp /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/ifcfg-br-eth0
Modify ifcfg-eth0 Modify ifcfg-br-eth0
TYPE=Ethernet TYPE=Ethernet
BOOTPROTO=none BOOTPROTO=static
NAME=eth0 DEFROUTE=yes
DEVICE=eth0 NAME=br-eth0
ONBOOT=yes DEVICE=br-eth0
ONBOOT=yes
IPADDR=192.168.202.8
PREFIX=24
GATEWAY=192.168.202.1
DNS1=192.168.0.21

[root@compute01 network-scripts]# ovs-vsctl add-br br-eth0


[root@compute01 network-scripts]# ovs-vsctl add-port br-eth0 eth0; systemctl restart network
[root@compute01 network-scripts]# ovs-vsctl show
[root@compute01 ~]# vi /etc/neutron/plugins/ml2/ml2_conf.ini

# line 181: add


[ml2_type_flat]
flat_networks = physnet1
[root@compute01 ~]# vi /etc/neutron/plugins/ml2/openvswitch_agent.ini

# line 194: add


[ovs]
bridge_mappings = physnet1:br-eth0
[root@compute01 ~]# systemctl restart neutron-openvswitch-agent

Configure Neutron (Network) Node


yum install chrony -y --enablerepo=centos-openstack-queens,epel
yum --enablerepo=centos-openstack-queens,epel -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch

Edit sysctl :
# vi /etc/sysctl.conf
net.ipv4.ip_forward = 1
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.all.rp_filter=0

# sysctl -p
# mv /etc/neutron/neutron.conf /etc/neutron/neutron.conf.org
# vi /etc/neutron/neutron.conf

[DEFAULT]
core_plugin = ml2
service_plugins = router
auth_strategy = keystone
state_path = /var/lib/neutron
allow_overlapping_ips = True
# RabbitMQ connection info
transport_url = rabbit://openstack:password@controller
# Keystone auth info
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = servicepassword
[oslo_concurrency]
lock_path = $state_path/lock

[root@network ~]# chmod 640 /etc/neutron/neutron.conf


[root@network ~]# chgrp neutron /etc/neutron/neutron.conf
[root@network ~]# vi /etc/neutron/l3_agent.ini
# line 17: add
interface_driver = openvswitch

[root@network ~]# vi /etc/neutron/dhcp_agent.ini


# line 17: add
interface_driver = openvswitch
# line 28: uncomment
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# line 37: uncomment and change
enable_isolated_metadata = true

[root@network ~]# vi /etc/neutron/metadata_agent.ini


# line 22: uncomment and specify Nova API server
nova_metadata_host = controller
# line 34: uncomment and specify any secret key you like
metadata_proxy_shared_secret = metadata_secret
# line 260: uncomment and specify Memcache server
memcache_servers = controller:11211

[root@network ~]# vi /etc/neutron/plugins/ml2/ml2_conf.ini


# line 129: add ( it's OK with no value for "tenant_network_types" (set later if need) )
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types =
mechanism_drivers = openvswitch,l2population,linuxbridge
extension_drivers = port_security
[root@network ~]# vi /etc/neutron/plugins/ml2/openvswitch_agent.ini

# line 304: add


[securitygroup]
firewall_driver = openvswitch
enable_security_group = true
enable_ipset = true

[root@network ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini


[root@network ~]# systemctl start openvswitch
[root@network ~]# systemctl enable openvswitch
[root@network ~]# ovs-vsctl add-br br-int
[root@network ~]# for service in dhcp-agent l3-agent metadata-agent openvswitch-agent; do
systemctl start neutron-$service
systemctl enable neutron-$service
done

Configure Bridge ethernet on NetworkNode

[root@networknode ~]# cp /etc/sysconfig/network-scripts/ifcfg-eth0 /root/ifcfg-eth0.backup


[root@networknode ~]# cp /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/ifcfg-br-eth0

Modify ifcfg-eth0 Modify ifcfg-br-eth0


TYPE=Ethernet TYPE=Ethernet
BOOTPROTO=none BOOTPROTO=static
NAME=eth0 DEFROUTE=yes
DEVICE=eth0 NAME=br-eth0
ONBOOT=yes DEVICE=br-eth0
ONBOOT=yes
IPADDR=192.168.202.7
PREFIX=24
GATEWAY=192.168.202.1
DNS1=192.168.0.21

[root@compute01 network-scripts]# ovs-vsctl add-br br-eth0


[root@compute01 network-scripts]# ovs-vsctl add-port br-eth0 eth0; systemctl restart network
[root@compute01 network-scripts]# ovs-vsctl show
[root@compute01 ~]# vi /etc/neutron/plugins/ml2/ml2_conf.ini

# line 181: add


[ml2_type_flat]
flat_networks = physnet1

[root@compute01 ~]# vi /etc/neutron/plugins/ml2/openvswitch_agent.ini

# line 194: add


[ovs]
bridge_mappings = physnet1:br-eth0
[root@compute01 ~]# systemctl restart neutron-openvswitch-agent

Configure Neutron Networking.


# openstack network create --share --external \
--provider-physical-network physnet1 \
--provider-network-type flat External

# openstack subnet create --network External \


--allocation-pool start=192.168.202.40,end=192.168.202.46 \
--dns-nameserver 192.168.0.21 --gateway 192.168.202.1 \
--subnet-range 192.168.202.0/24 External

# openstack network list


# openstack subnet list
Configure CINDER & SWIFT (Storage) Node

Configure cinder on controller node :


[root~(keystone)]# mv /etc/cinder/cinder.conf /etc/cinder/cinder.conf.org
[root~(keystone)]# vi /etc/cinder/cinder.conf

[DEFAULT]
# define own IP address
my_ip = 192.168.202.6
log_dir = /var/log/cinder
state_path = /var/lib/cinder
auth_strategy = keystone
# RabbitMQ connection info
transport_url = rabbit://openstack:password@controller

# MariaDB connection info


[database]
connection = mysql+pymysql://cinder:password@controller/cinder

# Keystone auth info


[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = servicepassword

[oslo_concurrency]
lock_path = $state_path/tmp

[root~(keystone)]# chmod 640 /etc/cinder/cinder.conf


[root~(keystone)]# chgrp cinder /etc/cinder/cinder.conf
[root~(keystone)]# su -s /bin/bash cinder -c "cinder-manage db sync"
[root~(keystone)]# systemctl start openstack-cinder-api openstack-cinder-scheduler
[root~(keystone)]# systemctl enable openstack-cinder-api openstack-cinder-scheduler

# show status
root~(keystone)# openstack volume service list

Install Cinder, swift and backup service


yum --enablerepo=centos-openstack-queens,epel -y install nfs-utils openstack-cinder python2-crypto targetcli
yum --enablerepo=centos-openstack-queens,epel -y install openstack-swift-proxy python-memcached openssh-clients
yum --enablerepo=centos-openstack-queens,epel -y install openstack-swift-account openstack-swift-container openstack-
swift-object xfsprogs rsync openssh-clients

Configure Cinder Volume.


[root@storage ~]# mv /etc/cinder/cinder.conf /etc/cinder/cinder.conf.org
[root@storage ~]# vi /etc/cinder/cinder.conf

[DEFAULT]
# define own IP address
my_ip = 192.168.202.9
log_dir = /var/log/cinder
state_path = /var/lib/cinder
auth_strategy = keystone
# RabbitMQ connection info
transport_url = rabbit://openstack:password@controller
# Glance connection info
glance_api_servers = http://controller:9292

# MariaDB connection info


[database]
connection = mysql+pymysql://cinder:password@controller/cinder

# Keystone auth info


[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = servicepassword

[oslo_concurrency]
lock_path = $state_path/tmp

[root@storage ~]# chmod 640 /etc/cinder/cinder.conf


[root@storage ~]# chgrp cinder /etc/cinder/cinder.conf
[root@storage ~]# systemctl start openstack-cinder-volume
[root@storage ~]# systemctl enable openstack-cinder-volume

Adding disk capacity on storage node :


Scan disk :
# for host in `ls /sys/class/scsi_host/`;do
echo "- - -" >/sys/class/scsi_host/${host}/scan;
done
# fdisk -l

Create volume group


[root@storagenode ~]# pvcreate /dev/vdb
Physical volume "/dev/vdb" successfully created.
[root@storagenode ~]# vgcreate -s 32M vg_cinder /dev/vdb
Volume group "vg_cinder" successfully created
Configure Cinder Volume on Storage Node.

[root@storage ~]# vi /etc/cinder/cinder.conf

# add follows into [DEFAULT] section


enabled_backends = lvm

# add follows to the end


[lvm]
iscsi_helper = lioadm
# volume group name just created
volume_group = vg_cinder
# IP address of Storage Node
iscsi_ip_address = 192.168.202.9
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volumes_dir = $state_path/volumes
iscsi_protocol = iscsi

[root@storage ~]# systemctl restart openstack-cinder-volume

Configure Nova on Compute Node.


[root@node01 ~]# vi /etc/nova/nova.conf
# add to the end
[cinder]
os_region_name = RegionOne
[root@node01 ~]# systemctl restart openstack-nova-compute

# set environment variable first


[root@dlp ~(keystone)]# echo "export OS_VOLUME_API_VERSION=2" >> ~/keystonerc
[root@dlp ~(keystone)]# source ~/keystonerc
[root@dlp ~(keystone)]# openstack volume type create lvm
+-------------+--------------------------------------+
| Field | Value |
+-------------+--------------------------------------+
| description | None |
| id | 85d8eb3b-ec2c-4138-87fa-64a370d51284 |
| is_public | True |
| name | lvm |
+-------------+--------------------------------------+
[root@dlp ~(keystone)]# openstack volume type create nfs
+-------------+--------------------------------------+
| Field | Value |
+-------------+--------------------------------------+
| description | None |
| id | 24d6ec24-7b94-438d-8ae5-14f814aef3b8 |
| is_public | True |
| name | nfs |
+-------------+--------------------------------------+
[root@dlp ~(keystone)]# openstack volume type list
+--------------------------------------+------+-----------+
| ID | Name | Is Public |
+--------------------------------------+------+-----------+
| 24d6ec24-7b94-438d-8ae5-14f814aef3b8 | nfs | True |
| 85d8eb3b-ec2c-4138-87fa-64a370d51284 | lvm | True |
+--------------------------------------+------+-----------+

INSTALLATION ICO SERVER


Reference : https://www.ibm.com/support/knowledgecenter/SS4KMC_2.5.0.8/com.ibm.ico.doc_2.5/c_preconfig_Mitaka.html
1. Add roles, users, and projects to Keystone. Run this step only once on the controller node
a. Create the following IBM Cloud Orchestrator roles in Keystone:
- netadmin
- sysadmin
- domain_admin
- catalogeditor
- member

~ controller # openstack role create netadmin


Repeat the command for the other IBM Cloud Orchestrator roles.
b. Assign a default admin project to the admin user by running the following command:
~ controller # openstack user set --project admin admin
c. Verify whether the member role on the admin project is granted for the user admin. If it is not granted, run
the following command:
~ controller # openstack role add --project admin --user admin member

You might also like