# yum install qemu-kvm qemu-img virt-manager libvirt libvirt-python python-virtinst libvirt-client bridge-utils # yum groupinstall Virtualization "Virtualization Client" "Virtualization Platform" "Virtualization Tools" # yum install openstack-utils memcached qpid-cpp-server openstack-nova dnsmasq-utils python-keystone-auth-token
virbr0
. Il comando ifconfig
dovrebbe mostrarla nel suo output: virbr0 Link encap:Ethernet HWaddr 52:54:00:54:65:A1 inet addr:192.168.122.1 Bcast:192.168.122.255 Mask:255.255.255.0 UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 RX packets:0 errors:0 dropped:0 overruns:0 frame:0 TX packets:45 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:0 (0.0 b) TX bytes:7585 (7.4 KiB)
/etc/qpidd.conf
sia settato auth=no
.
force_dhcp_release
: # openstack-config --set /etc/nova/nova.conf DEFAULT force_dhcp_release FalseNota bene: non viene mostrato nessun output ma modificato il file di configurazione.
# openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_inject_partition -1Nota bene: non viene mostrato nessun output ma modificato il file di configurazione.
/etc/libvirt/libvirtd.conf
e assicurarsi siano impostati i seguenti valori:
# OPENSTACK LIVE MIGRATION listen_tls = 0 listen_tcp = 1 auth_tcp = "none"
/etc/sysconfig/libvirtd
e assicurarsi che ci siano le impostazioni come seguono:
LIBVIRTD_ARGS="--listen"
libvirtd
:
/etc/nova
nel seguente modo. nova.conf
: [DEFAULT] # LOG/STATE logdir = /var/log/nova verbose = True state_path = /var/lib/nova lock_path = /var/lib/nova/tmp # AUTHENTICATION auth_strategy = keystone # SCHEDULER #compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler # VOLUMES volume_group = <VOLUME_NAME> #volume_name_template = volume-%08x iscsi_helper = tgtadm # DATABASE del Cloud Controller sql_connection = mysql://nova:<YOUR_NOVADB_PASSWORD>@openstack-01.cnaf.infn.it/nova # COMPUTE libvirt_type = kvm connection_type = libvirt #instance_name_template = instance-%08x #api_paste_config=/etc/nova/api-paste.ini #allow_resize_to_same_host=True # APIS #osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions #ec2_dmz_host=192.168.206.130 #s3_host=192.168.206.130 # GLANCE image_service=nova.image.glance.GlanceImageService glance_api_servers=192.168.206.130:9292 # NETWORK network_manager = nova.network.manager.FlatDHCPManager force_dhcp_release = True dhcpbridge_flagfile = /etc/nova/nova.conf firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver # Change my_ip to match each host my_ip = <THIS_SERVER_IP> public_interface = eth0 #vlan_interface = eth0 flat_network_bridge = virbr0 flat_interface = eth0 fixed_range = 192.168.122.0/24 # NOVNC CONSOLE vnc_enabled = true vncserver_listen = 0.0.0.0 vncserver_proxyclient_address = 131.154.100.111 novncproxy_base_url=http://openstack-01.cnaf.infn.it:6080/vnc_auto.html xvpvncproxy_base_url=http://openstack-01.cnaf.infn.it:6081/console # Qpid qpid_hostname = openstack-01.cnaf.infn.it rpc_backend = nova.rpc.impl_qpid # OTHER dhcpbridge = /usr/bin/nova-dhcpbridge injected_network_template = /usr/share/nova/interfaces.template libvirt_xml_template = /usr/share/nova/libvirt.xml.template libvirt_nonblocking = True libvirt_inject_partition = -1 vpn_client_template = /usr/share/nova/client.ovpn.template credentials_template = /usr/share/nova/novarc.template root_helper = sudo nova-rootwrap remove_unused_base_images = True
sql_connection
e qpid_hostname
"openstack-01.cnaf.infn.it" è il server che ospita il Cloud Controller
flat_network_bridge
"virbr0" è l'interfaccia di rete virtuale del server che si sta configurando
/etc/nova/api-paste.ini
(la parte precedente rimane invariata): [...] [filter:authtoken] paste.filter_factory = keystone.middleware.auth_token:filter_factory service_protocol = http service_host = <KEYSTONE_SERVICE_IP> service_port = 5000 auth_host = <KEYSTONE_SERVICE_IP> auth_port = 35357 auth_protocol = http auth_uri = http://<KEYSTONE_SERVICE_IP>:5000/ admin_tenant_name = service admin_user = nova admin_password = <NOVA_PASSWORD>Dove:
nova-manage
potrebbe dare in output alcuni messaggi di Warning su metodi deprecati.
# for svc in api objectstore compute network volume scheduler cert; do echo openstack-nova-$svc; service openstack-nova-$svc stop ; chkconfig openstack-nova-$svc on; done # nova-manage db sync # for svc in api objectstore compute network volume scheduler cert; do echo openstack-nova-$svc; /etc/init.d/openstack-nova-$svc start ; done
# nova-manage service list Binary Host Zone Status State Updated_At nova-cert openstack-01.cnaf.infn.it nova enabled :-) 2012-10-15 14:46:04 nova-consoleauth openstack-01.cnaf.infn.it nova enabled :-) 2012-10-15 14:45:54 nova-console openstack-01.cnaf.infn.it nova enabled :-) 2012-10-15 14:46:04 nova-scheduler openstack-01.cnaf.infn.it nova enabled :-) 2012-10-15 14:45:55 nova-compute openstack-01.cnaf.infn.it nova enabled :-) 2012-10-15 14:45:54 nova-volume openstack-01.cnaf.infn.it nova enabled :-) 2012-10-15 14:45:55 nova-network openstack-01.cnaf.infn.it nova enabled :-) 2012-10-15 14:46:01 nova-compute openstack-02.cnaf.infn.it nova enabled :-) 2012-10-15 14:45:57 nova-network openstack-02.cnaf.infn.it nova enabled :-) 2012-10-15 14:46:03 nova-compute openstack-03.cnaf.infn.it nova enabled :-) 2012-10-15 14:46:02 nova-network openstack-03.cnaf.infn.it nova enabled :-) 2012-10-15 14:46:02
virbr0
). Il seguente comando, ad esempio, crea una sottorete con range 192.168.122.0/24. # nova-manage network create private --multi_host=T --fixed_range_v4=192.168.122.0/24 --bridge_interface=virbr0 --num_networks=1 --network_size=256 # nova-manage network list id IPv4 IPv6 start address DNS1 DNS2 VlanID project uuid 1 192.168.122.0/24 None 192.168.122.2 8.8.4.4 None None None 052f9b4b-e6d7-4ad9-a3f1-929e80008372
# nova secgroup-list +---------+-------------+ | Name | Description | +---------+-------------+ | default | default | +---------+-------------+
# nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 +-------------+-----------+---------+-----------+--------------+ | IP Protocol | From Port | To Port | IP Range | Source Group | +-------------+-----------+---------+-----------+--------------+ | tcp | 22 | 22 | 0.0.0.0/0 | | +-------------+-----------+---------+-----------+--------------+
# nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 +-------------+-----------+---------+-----------+--------------+ | IP Protocol | From Port | To Port | IP Range | Source Group | +-------------+-----------+---------+-----------+--------------+ | icmp | -1 | -1 | 0.0.0.0/0 | | +-------------+-----------+---------+-----------+--------------+
iscsi-initiator-utils
su ogni Compute Node. Controllare quindi che sia installato attraverso il seguente comando: yum install iscsi-initiator-utils
service iscsid force-start
/etc/init.d/iscsid
per poter far partire in automatico iscsid in automatico all'avvio del server in modalità force-start
. Modificare i case start e restart dello switch all'interno dello script in modo tale che risulti come segue: [...] start) # rh_status_q && exit 0 # $1 force_start ;; [...] restart) # $1 stop force_start ;; [...]
# chkconfig iscsid on
|