diff --git a/Puppetfile b/Puppetfile index 1b6a7987b..7c4d747c6 100644 --- a/Puppetfile +++ b/Puppetfile @@ -7,11 +7,11 @@ mod 'aviator', :git => 'https://github.com/aimonb/puppet_aviator.git' mod 'ceilometer', - :commit => 'f17b3442402529281bdca9b2f70697e04bcfe216', + :commit => '05926aaaba5f733f7be1f3e57b59db3ee1385db3', :git => 'https://github.com/openstack/puppet-ceilometer.git' mod 'ceph', - :commit => 'b6ab15b47c81d7694fdcfd75d8f2e0a0481ca40c', + :commit => '71fccdf944a2b0721a972a8a86f97703cabd586e', :git => 'https://github.com/stackforge/puppet-ceph.git' mod 'certmonger', @@ -19,7 +19,7 @@ mod 'certmonger', :git => 'https://github.com/rcritten/puppet-certmonger.git' mod 'cinder', - :commit => '15fd1a81b047342e71bfe939cf059eb4fd143e4d', + :commit => '330dda3c4106ffef318d330f4de303b631196581', :git => 'https://github.com/openstack/puppet-cinder.git' mod 'common', @@ -43,7 +43,7 @@ mod 'galera', :git => 'https://github.com/redhat-openstack/puppet-galera.git' mod 'glance', - :commit => '0bb7577f7e4189495a148ca91f4f92c118e499e7', + :commit => 'c2d8e72f594b0caab0ace8c17d59717a3dd13385', :git => 'https://github.com/openstack/puppet-glance.git' mod 'gluster', @@ -51,7 +51,7 @@ mod 'gluster', :git => 'https://github.com/purpleidea/puppet-gluster.git' mod 'gnocchi', - :commit => '388ace2495661ff909b45c00471a121b5f2d18e3', + :commit => '02692807d218210a27a4043e27b3aa0ac38e8b28', :git => 'https://github.com/openstack/puppet-gnocchi.git' mod 'haproxy', @@ -59,11 +59,11 @@ mod 'haproxy', :git => 'https://github.com/puppetlabs/puppetlabs-haproxy.git' mod 'heat', - :commit => '8e742b8d09e3bded06e9529ccd5e7f38e527ab5d', + :commit => '3ced80f2eb6fc8a0694e8586c212d1a3ec5a3bdc', :git => 'https://github.com/openstack/puppet-heat.git' mod 'horizon', - :commit => '6e11fbf346560dab461964e53bcb1c817707abc5', + :commit => '79e373bebc010c25410a684437ae41b60e8eefc8', :git => 'https://github.com/openstack/puppet-horizon.git' mod 'inifile', @@ -75,7 +75,7 @@ mod 'ipa', :git => 'https://github.com/xbezdick/puppet-ipa-1.git' mod 'ironic', - :commit => '886a9c0aa2c8ac6167e2d95a60c55bbd6977f82b', + :commit => '88c228cca9d74072a5e944acf0063418b9bdfe4e', :git => 'https://github.com/openstack/puppet-ironic.git' mod 'keepalived', @@ -83,11 +83,15 @@ mod 'keepalived', :git => 'https://github.com/Unyonsys/puppet-module-keepalived.git' mod 'keystone', - :commit => 'e0e87604b8199c0ac0d443999153f88fb7797a9c', + :commit => '6dbef6298478b5a8799c9dadd8d0b4b7edbcdd74', :git => 'https://github.com/openstack/puppet-keystone.git' +mod 'kmod', + :commit => 'ea03df0eff7b7e5faccb9c4e386d451301468f04', + :git => 'https://github.com/camptocamp/puppet-kmod.git' + mod 'manila', - :commit => 'ff018bdef812bacfe512663c1cc0b92c7d1f26b2', + :commit => 'b94391158dc7c392986db9b0eb63562dde411ea1', :git => 'https://github.com/openstack/puppet-manila.git' mod 'memcached', @@ -111,7 +115,7 @@ mod 'mysql', :git => 'https://github.com/puppetlabs/puppetlabs-mysql.git' mod 'n1k_vsm', - :commit => 'ed391d3d9bdcccef3e2de3b0da98e654237d99a5', + :commit => '8266fb6dd504bc6ab310644728b076a6de996cbd', :git => 'https://github.com/stackforge/puppet-n1k-vsm.git' mod 'nagios', @@ -119,11 +123,11 @@ mod 'nagios', :git => 'https://github.com/gildub/puppet-nagios-openstack.git' mod 'neutron', - :commit => '36b463c003f94fe23c3d9eff207bbf5005ee470c', + :commit => 'c683ca5fc31111b7b0569cc71e9b959e62d21c4b', :git => 'https://github.com/openstack/puppet-neutron.git' mod 'nova', - :commit => 'cb2d487fb9baf2c1902182973e29982b5389e769', + :commit => 'ea1eddfaa04a3c44d9b6863bb4a87c89b91f3305', :git => 'https://github.com/openstack/puppet-nova.git' mod 'nssdb', @@ -135,15 +139,15 @@ mod 'ntp', :git => 'https://github.com/puppetlabs/puppetlabs-ntp' mod 'openstack_extras', - :commit => 'ab8ddaff93e06d7fdbfb2c61ffe9113562dd82ad', + :commit => '2d08ba975b65c613fbdc08812cbf30e7d6c9d6e4', :git => 'https://github.com/openstack/puppet-openstack_extras.git' mod 'openstacklib', - :commit => '78ef674e0e6bc73b3a0af7833025dc9786eaeaeb', + :commit => 'fcfffcf47069119805e44cc712739ba0c1e4603d', :git => 'https://github.com/openstack/puppet-openstacklib.git' mod 'pacemaker', - :commit => '1fa8e5591ab606a7be3bae32df088f6cec26d3c0', + :commit => 'e9a8f0d048c1bbc86a4bb1ee81417da3afe6673e', :git => 'https://github.com/redhat-openstack/puppet-pacemaker.git' mod 'puppet', @@ -155,7 +159,7 @@ mod 'qpid', :git => 'https://github.com/dprince/puppet-qpid' mod 'rabbitmq', - :commit => '1cdf6568473a0a45fb0b06b21b0c9a82c398bf08', + :commit => 'aa3ec5e061f619f4340a9c6ef1465f2f6a673cb4', :git => 'https://github.com/puppetlabs/puppetlabs-rabbitmq.git' mod 'redis', @@ -171,7 +175,7 @@ mod 'rsync', :git => 'https://github.com/puppetlabs/puppetlabs-rsync.git' mod 'sahara', - :commit => '56d9a841bff7389d1128d40dad4a319988d547f1', + :commit => '6e79d710a97d7c43aea766bbfa8c4d782a7c2049', :git => 'https://github.com/openstack/puppet-sahara.git' mod 'snmp', @@ -191,7 +195,7 @@ mod 'stdlib', :git => 'https://github.com/puppetlabs/puppetlabs-stdlib.git' mod 'swift', - :commit => '275fb9b111bbb30148e762afc0798aa891091cf2', + :commit => 'd736e6a00e8ff1e4f05c8fbc9535908fc2112587', :git => 'https://github.com/openstack/puppet-swift.git' mod 'sysctl', @@ -199,7 +203,7 @@ mod 'sysctl', :git => 'https://github.com/puppetlabs/puppetlabs-sysctl.git' mod 'tempest', - :commit => 'ab9601ff339f6acb40901291133c03cf1b45d5ee', + :commit => '4cc2ad2e827e1117d79dbab1bc91283428ffdc78', :git => 'https://github.com/openstack/puppet-tempest.git' mod 'timezone', @@ -207,15 +211,15 @@ mod 'timezone', :git => 'https://github.com/saz/puppet-timezone.git' mod 'tripleo', - :commit => '717eb39ca7295e128ab99694ea3a7ae658b9528f', + :commit => 'a003b3d97ebe8bd470717c419434851d60669bf7', :git => 'https://github.com/openstack/puppet-tripleo.git' mod 'trove', - :commit => '2636a384f67c2ac1513ac709f3ae4525668ccb62', + :commit => '1c5d6895c86fdb99501c72ef999864268bb7a389', :git => 'https://github.com/openstack/puppet-trove' mod 'tuskar', - :commit => '85a3991330f36903cd66a6e12512b54ab758ec41', + :commit => '09e2ced9be2cdf5d94dd6310e9df3a3155ab549b', :git => 'https://github.com/openstack/puppet-tuskar.git' mod 'vcsrepo', @@ -227,7 +231,7 @@ mod 'vlan', :git => 'https://github.com/derekhiggins/puppet-vlan.git' mod 'vswitch', - :commit => 'e73073a120d833958d22925a1badbc1ccdc0f15a', + :commit => '5992d41a238c93bcf6bd6989c23d3c7577c48b69', :git => 'https://github.com/openstack/puppet-vswitch.git' mod 'xinetd', diff --git a/ceilometer/CHANGELOG.md b/ceilometer/CHANGELOG.md index d5913be17..e29904bed 100644 --- a/ceilometer/CHANGELOG.md +++ b/ceilometer/CHANGELOG.md @@ -1,3 +1,32 @@ +##2015-07-08 - 6.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Kilo. + +####Backwards-incompatible changes +- Move rabbit/kombu settings to oslo_messaging_rabbit section + +####Features +- Puppet 4.x support +- make crontab for expirer optional +- Refactorise Keystone resources management +- db: Added postgresql backend using openstacklib helper +- Implement Ceilometer-API as a WSGI process support +- Add support for ceilometer-polling agent +- Add support for identity_uri +- Tag all Ceilometer packages +- Add udp_address/udp_port parameters for collector. +- Deprecate old public, internal and admin parameters + +####Bugfixes +- Ensure python-mysqldb is installed before MySQL db_sync +- Fix dependency on nova-common package + +####Maintenance +- Acceptance tests with Beaker +- Fix spec tests for RSpec 3.x and Puppet 4.x + + ##2015-06-17 - 5.1.0 ###Summary diff --git a/ceilometer/README.md b/ceilometer/README.md index 7bd699a77..56155b2f9 100644 --- a/ceilometer/README.md +++ b/ceilometer/README.md @@ -1,7 +1,7 @@ Ceilometer ========== -5.1.0 - 2014.2 - Juno +6.0.0 - 2015.1 - Kilo #### Table of Contents diff --git a/ceilometer/manifests/agent/auth.pp b/ceilometer/manifests/agent/auth.pp index 2b6f536e2..47f04026a 100644 --- a/ceilometer/manifests/agent/auth.pp +++ b/ceilometer/manifests/agent/auth.pp @@ -29,14 +29,20 @@ # [*auth_cacert*] # Certificate chain for SSL validation. Optional; Defaults to 'None' # +# [*auth_endpoint_type*] +# Type of endpoint in Identity service catalog to use for +# communication with OpenStack services. +# Optional. Defaults to undef. +# class ceilometer::agent::auth ( $auth_password, - $auth_url = 'http://localhost:5000/v2.0', - $auth_region = 'RegionOne', - $auth_user = 'ceilometer', - $auth_tenant_name = 'services', - $auth_tenant_id = '', - $auth_cacert = undef, + $auth_url = 'http://localhost:5000/v2.0', + $auth_region = 'RegionOne', + $auth_user = 'ceilometer', + $auth_tenant_name = 'services', + $auth_tenant_id = '', + $auth_cacert = undef, + $auth_endpoint_type = undef, ) { if ! $auth_cacert { @@ -59,4 +65,10 @@ } } + if $auth_endpoint_type { + ceilometer_config { + 'service_credentials/os_endpoint_type' : value => $auth_endpoint_type; + } + } + } diff --git a/ceilometer/manifests/api.pp b/ceilometer/manifests/api.pp index 66d71d38b..9bcb67937 100644 --- a/ceilometer/manifests/api.pp +++ b/ceilometer/manifests/api.pp @@ -64,6 +64,15 @@ # (optional) ensure state for package. # Defaults to 'present' # +# [*service_name*] +# (optional) Name of the service that will be providing the +# server functionality of ceilometer-api. +# If the value is 'httpd', this means ceilometer-api will be a web +# service, and you must use another class to configure that +# web service. For example, use class { 'ceilometer::wsgi::apache'...} +# to make keystone be a web app using apache mod_wsgi. +# Defaults to '$::ceilometer::params::api_service_name' +# class ceilometer::api ( $manage_service = true, $enabled = true, @@ -75,23 +84,24 @@ $keystone_identity_uri = false, $host = '0.0.0.0', $port = '8777', + $service_name = $::ceilometer::params::api_service_name, # DEPRECATED PARAMETERS $keystone_host = '127.0.0.1', $keystone_port = '35357', $keystone_auth_admin_prefix = false, $keystone_protocol = 'http', -) { +) inherits ceilometer::params { include ::ceilometer::params include ::ceilometer::policy validate_string($keystone_password) - Ceilometer_config<||> ~> Service['ceilometer-api'] - Class['ceilometer::policy'] ~> Service['ceilometer-api'] + Ceilometer_config<||> ~> Service[$service_name] + Class['ceilometer::policy'] ~> Service[$service_name] Package['ceilometer-api'] -> Ceilometer_config<||> - Package['ceilometer-api'] -> Service['ceilometer-api'] + Package['ceilometer-api'] -> Service[$service_name] Package['ceilometer-api'] -> Class['ceilometer::policy'] package { 'ceilometer-api': ensure => $package_ensure, @@ -106,16 +116,30 @@ $service_ensure = 'stopped' } } - - Package['ceilometer-common'] -> Service['ceilometer-api'] - service { 'ceilometer-api': - ensure => $service_ensure, - name => $::ceilometer::params::api_service_name, - enable => $enabled, - hasstatus => true, - hasrestart => true, - require => Class['ceilometer::db'], - subscribe => Exec['ceilometer-dbsync'] + Package['ceilometer-common'] -> Service[$service_name] + + if $service_name == $::ceilometer::params::api_service_name { + service { 'ceilometer-api': + ensure => $service_ensure, + name => $::ceilometer::params::api_service_name, + enable => $enabled, + hasstatus => true, + hasrestart => true, + require => Class['ceilometer::db'], + subscribe => Exec['ceilometer-dbsync'], + tag => 'ceilometer-service', + } + } elsif $service_name == 'httpd' { + include ::apache::params + service { 'ceilometer-api': + ensure => 'stopped', + name => $::ceilometer::params::api_service_name, + enable => false, + tag => 'ceilometer-service', + } + Class['ceilometer::db'] -> Service[$service_name] + } else { + fail('Invalid service_name. Either keystone/openstack-ceilometer-api for running as a standalone service, or httpd for being run by a httpd server') } ceilometer_config { diff --git a/ceilometer/manifests/init.pp b/ceilometer/manifests/init.pp index 577e3533f..3769f3ce9 100644 --- a/ceilometer/manifests/init.pp +++ b/ceilometer/manifests/init.pp @@ -41,6 +41,22 @@ # password to connect to the rabbit_server. Optional. Defaults to empty. # [*rabbit_virtual_host*] # virtualhost to use. Optional. Defaults to '/' +# +# [*rabbit_heartbeat_timeout_threshold*] +# (optional) Number of seconds after which the RabbitMQ broker is considered +# down if the heartbeat keepalive fails. Any value >0 enables heartbeats. +# Heartbeating helps to ensure the TCP connection to RabbitMQ isn't silently +# closed, resulting in missed or lost messages from the queue. +# (Requires kombu >= 3.0.7 and amqp >= 1.4.0) +# Defaults to 0 +# +# [*rabbit_heartbeat_rate*] +# (optional) How often during the rabbit_heartbeat_timeout_threshold period to +# check the heartbeat on RabbitMQ connection. (i.e. rabbit_heartbeat_rate=2 +# when rabbit_heartbeat_timeout_threshold=60, the heartbeat will be checked +# every 30 seconds. +# Defaults to 2 +# # [*rabbit_use_ssl*] # (optional) Connect over SSL for RabbitMQ # Defaults to false @@ -75,39 +91,41 @@ # (optional) various QPID options # class ceilometer( - $metering_secret = false, - $notification_topics = ['notifications'], - $package_ensure = 'present', - $debug = false, - $log_dir = '/var/log/ceilometer', - $verbose = false, - $use_syslog = false, - $log_facility = 'LOG_USER', - $rpc_backend = 'ceilometer.openstack.common.rpc.impl_kombu', - $rabbit_host = '127.0.0.1', - $rabbit_port = 5672, - $rabbit_hosts = undef, - $rabbit_userid = 'guest', - $rabbit_password = '', - $rabbit_virtual_host = '/', - $rabbit_use_ssl = false, - $kombu_ssl_ca_certs = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_keyfile = undef, - $kombu_ssl_version = 'TLSv1', - $qpid_hostname = 'localhost', - $qpid_port = 5672, - $qpid_username = 'guest', - $qpid_password = 'guest', - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, - $qpid_reconnect = true, - $qpid_reconnect_timeout = 0, - $qpid_reconnect_limit = 0, - $qpid_reconnect_interval_min = 0, - $qpid_reconnect_interval_max = 0, - $qpid_reconnect_interval = 0 + $metering_secret = false, + $notification_topics = ['notifications'], + $package_ensure = 'present', + $debug = false, + $log_dir = '/var/log/ceilometer', + $verbose = false, + $use_syslog = false, + $log_facility = 'LOG_USER', + $rpc_backend = 'ceilometer.openstack.common.rpc.impl_kombu', + $rabbit_host = '127.0.0.1', + $rabbit_port = 5672, + $rabbit_hosts = undef, + $rabbit_userid = 'guest', + $rabbit_password = '', + $rabbit_virtual_host = '/', + $rabbit_heartbeat_timeout_threshold = 0, + $rabbit_heartbeat_rate = 2, + $rabbit_use_ssl = false, + $kombu_ssl_ca_certs = undef, + $kombu_ssl_certfile = undef, + $kombu_ssl_keyfile = undef, + $kombu_ssl_version = 'TLSv1', + $qpid_hostname = 'localhost', + $qpid_port = 5672, + $qpid_username = 'guest', + $qpid_password = 'guest', + $qpid_heartbeat = 60, + $qpid_protocol = 'tcp', + $qpid_tcp_nodelay = true, + $qpid_reconnect = true, + $qpid_reconnect_timeout = 0, + $qpid_reconnect_limit = 0, + $qpid_reconnect_interval_min = 0, + $qpid_reconnect_interval_max = 0, + $qpid_reconnect_interval = 0, ) { validate_string($metering_secret) @@ -187,10 +205,12 @@ } ceilometer_config { - 'oslo_messaging_rabbit/rabbit_userid' : value => $rabbit_userid; - 'oslo_messaging_rabbit/rabbit_password' : value => $rabbit_password, secret => true; - 'oslo_messaging_rabbit/rabbit_virtual_host' : value => $rabbit_virtual_host; - 'oslo_messaging_rabbit/rabbit_use_ssl' : value => $rabbit_use_ssl; + 'oslo_messaging_rabbit/rabbit_userid': value => $rabbit_userid; + 'oslo_messaging_rabbit/rabbit_password': value => $rabbit_password, secret => true; + 'oslo_messaging_rabbit/rabbit_virtual_host': value => $rabbit_virtual_host; + 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl; + 'oslo_messaging_rabbit/heartbeat_timeout_threshold': value => $rabbit_heartbeat_timeout_threshold; + 'oslo_messaging_rabbit/heartbeat_rate': value => $rabbit_heartbeat_rate; } if $rabbit_use_ssl { diff --git a/ceilometer/manifests/params.pp b/ceilometer/manifests/params.pp index 524c2f902..a1923230b 100644 --- a/ceilometer/manifests/params.pp +++ b/ceilometer/manifests/params.pp @@ -68,7 +68,7 @@ } } $ceilometer_wsgi_script_path = '/usr/lib/cgi-bin/ceilometer' - $ceilometer_wsgi_script_source = '/usr/share/ceilometer/app.wsgi' + $ceilometer_wsgi_script_source = '/usr/lib/python2.7/dist-packages/ceilometer/api/app.wsgi' } default: { fail("Unsupported osfamily: ${::osfamily} operatingsystem: \ diff --git a/ceilometer/metadata.json b/ceilometer/metadata.json index 0b32dd6bb..c7d82fc99 100644 --- a/ceilometer/metadata.json +++ b/ceilometer/metadata.json @@ -1,6 +1,6 @@ { - "name": "stackforge-ceilometer", - "version": "5.1.0", + "name": "openstack-ceilometer", + "version": "6.0.0", "author": "eNovance and OpenStack Contributors", "summary": "Puppet module for OpenStack Ceilometer", "license": "Apache-2.0", @@ -33,8 +33,8 @@ "dependencies": [ { "name": "puppetlabs/apache", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0 <6.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } ] } diff --git a/ceilometer/spec/acceptance/ceilometer_wsgi_apache_spec.rb b/ceilometer/spec/acceptance/ceilometer_wsgi_apache_spec.rb new file mode 100644 index 000000000..d731de218 --- /dev/null +++ b/ceilometer/spec/acceptance/ceilometer_wsgi_apache_spec.rb @@ -0,0 +1,133 @@ +require 'spec_helper_acceptance' + +describe 'ceilometer with mysql' do + + context 'default parameters' do + + it 'should work with no errors' do + pp= <<-EOS + Exec { logoutput => 'on_failure' } + + # Common resources + case $::osfamily { + 'Debian': { + include ::apt + class { '::openstack_extras::repo::debian::ubuntu': + release => 'kilo', + package_require => true, + } + $package_provider = 'apt' + } + 'RedHat': { + class { '::openstack_extras::repo::redhat::redhat': + release => 'kilo', + } + package { 'openstack-selinux': ensure => 'latest' } + $package_provider = 'yum' + } + default: { + fail("Unsupported osfamily (${::osfamily})") + } + } + + class { '::mysql::server': } + + class { '::rabbitmq': + delete_guest_user => true, + package_provider => $package_provider, + } + + rabbitmq_vhost { '/': + provider => 'rabbitmqctl', + require => Class['rabbitmq'], + } + + rabbitmq_user { 'ceilometer': + admin => true, + password => 'an_even_bigger_secret', + provider => 'rabbitmqctl', + require => Class['rabbitmq'], + } + + rabbitmq_user_permissions { 'ceilometer@/': + configure_permission => '.*', + write_permission => '.*', + read_permission => '.*', + provider => 'rabbitmqctl', + require => Class['rabbitmq'], + } + + + # Keystone resources, needed by Ceilometer to run + class { '::keystone::db::mysql': + password => 'keystone', + } + class { '::keystone': + verbose => true, + debug => true, + database_connection => 'mysql://keystone:keystone@127.0.0.1/keystone', + admin_token => 'admin_token', + enabled => true, + } + class { '::keystone::roles::admin': + email => 'test@example.tld', + password => 'a_big_secret', + } + class { '::keystone::endpoint': + public_url => "https://${::fqdn}:5000/", + admin_url => "https://${::fqdn}:35357/", + } + + # Ceilometer resources + class { '::ceilometer': + metering_secret => 'secrete', + rabbit_userid => 'ceilometer', + rabbit_password => 'an_even_bigger_secret', + rabbit_host => '127.0.0.1', + } + # Until https://review.openstack.org/177593 is merged: + Package<| title == 'python-mysqldb' |> -> Class['ceilometer::db'] + class { '::ceilometer::db::mysql': + password => 'a_big_secret', + } + class { '::ceilometer::db': + database_connection => 'mysql://ceilometer:a_big_secret@127.0.0.1/ceilometer?charset=utf8', + } + class { '::ceilometer::keystone::auth': + password => 'a_big_secret', + } + class { '::ceilometer::client': } + class { '::ceilometer::collector': } + class { '::ceilometer::expirer': } + class { '::ceilometer::alarm::evaluator': } + class { '::ceilometer::alarm::notifier': } + class { '::ceilometer::agent::central': } + class { '::ceilometer::agent::notification': } + class { '::ceilometer::api': + enabled => true, + keystone_password => 'a_big_secret', + keystone_identity_uri => 'http://127.0.0.1:35357/', + service_name => 'httpd', + } + include ::apache + class { '::ceilometer::wsgi::apache': + ssl => false, + } + EOS + + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + apply_manifest(pp, :catch_changes => true) + end + + describe port(8777) do + it { is_expected.to be_listening } + end + + describe cron do + it { is_expected.to have_entry('1 0 * * * ceilometer-expirer').with_user('ceilometer') } + end + + end +end diff --git a/ceilometer/spec/classes/ceilometer_agent_auth_spec.rb b/ceilometer/spec/classes/ceilometer_agent_auth_spec.rb index f72459315..fd1908cd7 100644 --- a/ceilometer/spec/classes/ceilometer_agent_auth_spec.rb +++ b/ceilometer/spec/classes/ceilometer_agent_auth_spec.rb @@ -30,9 +30,13 @@ context 'when overriding parameters' do before do - params.merge!(:auth_cacert => '/tmp/dummy.pem') + params.merge!( + :auth_cacert => '/tmp/dummy.pem', + :auth_endpoint_type => 'internalURL', + ) end it { is_expected.to contain_ceilometer_config('service_credentials/os_cacert').with_value(params[:auth_cacert]) } + it { is_expected.to contain_ceilometer_config('service_credentials/os_endpoint_type').with_value(params[:auth_endpoint_type]) } end end diff --git a/ceilometer/spec/classes/ceilometer_api_spec.rb b/ceilometer/spec/classes/ceilometer_api_spec.rb index e54eea1a9..6ada5b8be 100644 --- a/ceilometer/spec/classes/ceilometer_api_spec.rb +++ b/ceilometer/spec/classes/ceilometer_api_spec.rb @@ -3,7 +3,8 @@ describe 'ceilometer::api' do let :pre_condition do - "class { 'ceilometer': metering_secret => 's3cr3t' }" + "class { 'ceilometer': metering_secret => 's3cr3t' } + include ::ceilometer::db" end let :params do @@ -113,11 +114,51 @@ ) end end + + context 'when running ceilometer-api in wsgi' do + before do + params.merge!({ :service_name => 'httpd' }) + end + + let :pre_condition do + "include ::apache + include ::ceilometer::db + class { 'ceilometer': metering_secret => 's3cr3t' }" + end + + it 'configures ceilometer-api service with Apache' do + is_expected.to contain_service('ceilometer-api').with( + :ensure => 'stopped', + :name => platform_params[:api_service_name], + :enable => false, + :tag => 'ceilometer-service', + ) + end + end + + context 'when service_name is not valid' do + before do + params.merge!({ :service_name => 'foobar' }) + end + + let :pre_condition do + "include ::apache + include ::ceilometer::db + class { 'ceilometer': metering_secret => 's3cr3t' }" + end + + it_raises 'a Puppet::Error', /Invalid service_name/ + end end context 'on Debian platforms' do let :facts do - { :osfamily => 'Debian' } + { :osfamily => 'Debian', + :operatingsystem => 'Debian', + :operatingsystemrelease => '8.0', + :concat_basedir => '/var/lib/puppet/concat', + :fqdn => 'some.host.tld', + :processorcount => 2 } end let :platform_params do @@ -130,7 +171,12 @@ context 'on RedHat platforms' do let :facts do - { :osfamily => 'RedHat' } + { :osfamily => 'RedHat', + :operatingsystem => 'RedHat', + :operatingsystemrelease => '7.1', + :fqdn => 'some.host.tld', + :concat_basedir => '/var/lib/puppet/concat', + :processorcount => 2 } end let :platform_params do diff --git a/ceilometer/spec/classes/ceilometer_init_spec.rb b/ceilometer/spec/classes/ceilometer_init_spec.rb index 536a30614..594fbb09e 100644 --- a/ceilometer/spec/classes/ceilometer_init_spec.rb +++ b/ceilometer/spec/classes/ceilometer_init_spec.rb @@ -39,6 +39,7 @@ it_configures 'a ceilometer base installation' it_configures 'rabbit with SSL support' it_configures 'rabbit without HA support (with backward compatibility)' + it_configures 'rabbit with connection heartbeats' end context 'with rabbit_hosts parameter' do @@ -178,6 +179,8 @@ is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_password').with_value( params[:rabbit_password] ) is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_password').with_value( params[:rabbit_password] ).with_secret(true) is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value( params[:rabbit_virtual_host] ) + is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') + is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') end it { is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_host').with_value( params[:rabbit_host] ) } @@ -194,6 +197,8 @@ is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_password').with_value( params[:rabbit_password] ) is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_password').with_value( params[:rabbit_password] ).with_secret(true) is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value( params[:rabbit_virtual_host] ) + is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') + is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') end it { is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_host').with_ensure('absent') } @@ -210,6 +215,8 @@ is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_password').with_value( params[:rabbit_password] ) is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_password').with_value( params[:rabbit_password] ).with_secret(true) is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value( params[:rabbit_virtual_host] ) + is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') + is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') end it { is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_host').with_ensure('absent') } @@ -219,6 +226,18 @@ end + shared_examples_for 'rabbit with connection heartbeats' do + context "with heartbeat configuration" do + before { params.merge!( + :rabbit_heartbeat_timeout_threshold => '60', + :rabbit_heartbeat_rate => '10' + ) } + + it { is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('60') } + it { is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/heartbeat_rate').with_value('10') } + end + end + shared_examples_for 'rabbit with SSL support' do context "with default parameters" do it { is_expected.to contain_ceilometer_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('false') } diff --git a/ceilometer/spec/classes/ceilometer_wsgi_apache_spec.rb b/ceilometer/spec/classes/ceilometer_wsgi_apache_spec.rb index b7c689900..ad4c07654 100644 --- a/ceilometer/spec/classes/ceilometer_wsgi_apache_spec.rb +++ b/ceilometer/spec/classes/ceilometer_wsgi_apache_spec.rb @@ -119,7 +119,7 @@ :httpd_service_name => 'apache2', :httpd_ports_file => '/etc/apache2/ports.conf', :wsgi_script_path => '/usr/lib/cgi-bin/ceilometer', - :wsgi_script_source => '/usr/share/ceilometer/app.wsgi' + :wsgi_script_source => '/usr/lib/python2.7/dist-packages/ceilometer/api/app.wsgi' } end diff --git a/ceilometer/spec/spec_helper_acceptance.rb b/ceilometer/spec/spec_helper_acceptance.rb index 429e807c4..6031cb8d4 100644 --- a/ceilometer/spec/spec_helper_acceptance.rb +++ b/ceilometer/spec/spec_helper_acceptance.rb @@ -38,7 +38,7 @@ zuul_clone_cmd += "git://git.openstack.org #{repo}" on host, zuul_clone_cmd else - on host, "git clone https://git.openstack.org/#{repo} #{repo}" + on host, "git -b stable/kilo clone https://git.openstack.org/#{repo} #{repo}" end on host, "ZUUL_REF=#{zuul_ref} ZUUL_BRANCH=#{zuul_branch} ZUUL_URL=#{zuul_url} bash #{repo}/install_modules.sh" diff --git a/ceph/.fixtures.yml b/ceph/.fixtures.yml index 9428e1cf0..0448cc0ae 100644 --- a/ceph/.fixtures.yml +++ b/ceph/.fixtures.yml @@ -7,10 +7,10 @@ fixtures: 'inifile': 'git://github.com/puppetlabs/puppetlabs-inifile' 'apache': repo: 'git://github.com/puppetlabs/puppetlabs-apache' - ref: '1.0.1' + ref: '1.4.1' 'concat': repo: 'git://github.com/puppetlabs/puppetlabs-concat' - ref: '1.1.0' + ref: '1.2.1' symlinks: 'ceph': "#{source_dir}" diff --git a/ceph/Gemfile b/ceph/Gemfile index 0511f9d06..e3e4a8158 100644 --- a/ceph/Gemfile +++ b/ceph/Gemfile @@ -2,8 +2,8 @@ source 'https://rubygems.org' group :development, :test do gem 'puppetlabs_spec_helper', :require => false - gem 'rspec-puppet', '~> 2.0.0', :require => false - gem 'beaker-rspec', '~> 2.2.4', :require => false + gem 'rspec-puppet', '~> 2.1.0', :require => false + gem 'beaker-rspec', :require => false gem 'puppet-lint-param-docs' gem 'metadata-json-lint' gem 'puppet-lint-absolute_classname-check' diff --git a/ceph/Gemfile-rspec-system b/ceph/Gemfile-rspec-system index cce2000a1..1927be5de 100644 --- a/ceph/Gemfile-rspec-system +++ b/ceph/Gemfile-rspec-system @@ -3,7 +3,7 @@ source 'https://rubygems.org' group :development, :test do gem 'puppetlabs_spec_helper', :require => false - gem 'rspec-puppet', '~> 2.0.0', :require => false + gem 'rspec-puppet', '~> 2.1.0', :require => false gem 'metadata-json-lint' gem 'puppet-lint-param-docs' gem 'puppet-lint-absolute_classname-check' diff --git a/ceph/Puppetfile b/ceph/Puppetfile index 65400b5f6..f84861ad4 100644 --- a/ceph/Puppetfile +++ b/ceph/Puppetfile @@ -17,8 +17,8 @@ mod 'bodepd/scenario_node_terminus', mod 'puppetlabs/apache', :git => 'git://github.com/puppetlabs/puppetlabs-apache', - :ref => '1.0.1' + :ref => '1.4.1' mod 'puppetlabs/concat', :git => 'git://github.com/puppetlabs/puppetlabs-concat', - :ref => '1.1.x' + :ref => '1.2.1' diff --git a/ceph/README.md b/ceph/README.md index f3ae2179c..bdd0b03b2 100644 --- a/ceph/README.md +++ b/ceph/README.md @@ -128,10 +128,12 @@ The RS_SET environment variable contains the resource set of linux distribution configurations for which integration tests are going to be run. Available values are -* two-ubuntu-server-1204-x64 -* ubuntu-server-1204-x64 -* two-centos-66-x64 -* centos-66-x64 +* two-ubuntu-server-12042-x64 +* one-ubuntu-server-12042-x64 +* two-centos-65-x64 +* one-centos-65-x64 +* two-centos-70-x64 +* one-centos-70-x64 The default is diff --git a/ceph/manifests/init.pp b/ceph/manifests/init.pp index 170d53e76..ffd5a7d12 100644 --- a/ceph/manifests/init.pp +++ b/ceph/manifests/init.pp @@ -68,6 +68,9 @@ # individually through ceph::mon. # Optional. String like e.g. 'a, b, c'. # +# [*ms_bind_ipv6*] Enables Ceph daemons to bind to IPv6 addresses. +# Optional. Boolean. Default provided by Ceph. +# # [*require_signatures*] If Ceph requires signatures on all # message traffic (client<->cluster and between cluster daemons). # Optional. Boolean. Default provided by Ceph. @@ -103,6 +106,7 @@ $mon_osd_nearfull_ratio = undef, $mon_initial_members = undef, $mon_host = undef, + $ms_bind_ipv6 = undef, $require_signatures = undef, $cluster_require_signatures = undef, $service_require_signatures = undef, @@ -133,6 +137,7 @@ 'global/mon_osd_nearfull_ratio': value => $mon_osd_nearfull_ratio; 'global/mon_initial_members': value => $mon_initial_members; 'global/mon_host': value => $mon_host; + 'global/ms_bind_ipv6': value => $ms_bind_ipv6; 'global/require_signatures': value => $require_signatures; 'global/cluster_require_signatures': value => $cluster_require_signatures; 'global/service_require_signatures': value => $service_require_signatures; diff --git a/ceph/manifests/profile/base.pp b/ceph/manifests/profile/base.pp index 12d60f07f..8f9d5cf49 100644 --- a/ceph/manifests/profile/base.pp +++ b/ceph/manifests/profile/base.pp @@ -39,6 +39,7 @@ osd_pool_default_min_size => $ceph::profile::params::osd_pool_default_min_size, mon_initial_members => $ceph::profile::params::mon_initial_members, mon_host => $ceph::profile::params::mon_host, + ms_bind_ipv6 => $ceph::profile::params::ms_bind_ipv6, cluster_network => $ceph::profile::params::cluster_network, public_network => $ceph::profile::params::public_network, } diff --git a/ceph/manifests/profile/params.pp b/ceph/manifests/profile/params.pp index 6a11b494d..c79efe234 100644 --- a/ceph/manifests/profile/params.pp +++ b/ceph/manifests/profile/params.pp @@ -38,6 +38,9 @@ # individually through ceph::mon. # Optional. String like e.g. 'a, b, c'. # +# [*ms_bind_ipv6*] Enables Ceph daemons to bind to IPv6 addresses. +# Optional. Boolean. Default provided by Ceph. +# # [*osd_pool_default_pg_num*] The default number of PGs per pool. # Optional. Integer. Default provided by Ceph. # @@ -95,6 +98,7 @@ $authentication_type = undef, $mon_initial_members = undef, $mon_host = undef, + $ms_bind_ipv6 = undef, $osd_pool_default_pg_num = undef, $osd_pool_default_pgp_num = undef, $osd_pool_default_size = undef, diff --git a/ceph/manifests/repo.pp b/ceph/manifests/repo.pp index 45662b0d7..c01c2ebb7 100644 --- a/ceph/manifests/repo.pp +++ b/ceph/manifests/repo.pp @@ -2,6 +2,7 @@ # Copyright (C) 2013 Cloudwatt # Copyright (C) 2014 Nine Internet Solutions AG # Copyright (C) 2014 Catalyst IT Limited +# Copyright (C) 2015 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +22,7 @@ # Author: Andrew Woodward # Author: David Gurtner # Author: Ricardo Rocha +# Author: Emilien Macchi # # == Class: ceph::repo # @@ -53,7 +55,7 @@ apt::key { 'ceph': ensure => $ensure, key => '17ED316D', - key_source => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', + key_source => 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=keys/release.asc', } apt::source { 'ceph': @@ -98,17 +100,34 @@ 'RedHat': { $enabled = $ensure ? { 'present' => '1', 'absent' => '0', default => absent, } - yumrepo { 'ext-epel-6.8': + + if ((($::operatingsystem == 'RedHat' or $::operatingsystem == 'CentOS') and (versioncmp($::operatingsystemmajrelease, '7') < 0)) or ($::operatingsystem == 'Fedora' and (versioncmp($::operatingsystemmajrelease, '19') < 0))) { + $el = '6' + } else { + $el = '7' + } + + if ($::operatingsystem == 'CentOS') { + file_line { 'exclude base': + ensure => $ensure, + path => '/etc/yum.repos.d/CentOS-Base.repo', + after => '^\[base\]$', + line => 'exclude=python-ceph-compat python-rbd python-rados python-cephfs', + } -> Package<| tag == 'ceph' |> + } + + yumrepo { "ext-epel-${el}": # puppet versions prior to 3.5 do not support ensure, use enabled instead enabled => $enabled, - descr => 'External EPEL 6.8', - name => 'ext-epel-6.8', + descr => "External EPEL ${el}", + name => "ext-epel-${el}", baseurl => absent, gpgcheck => '0', gpgkey => absent, - mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch', + mirrorlist => "http://mirrors.fedoraproject.org/metalink?repo=epel-${el}&arch=\$basearch", priority => '20', # prefer ceph repos over EPEL tag => 'ceph', + exclude => 'python-ceph-compat python-rbd python-rados python-cephfs', } yumrepo { 'ext-ceph': @@ -116,7 +135,7 @@ enabled => $enabled, descr => "External Ceph ${release}", name => "ext-ceph-${release}", - baseurl => "http://ceph.com/rpm-${release}/el6/\$basearch", + baseurl => "http://ceph.com/rpm-${release}/el${el}/\$basearch", gpgcheck => '1', gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', mirrorlist => absent, @@ -129,7 +148,7 @@ enabled => $enabled, descr => 'External Ceph noarch', name => "ext-ceph-${release}-noarch", - baseurl => "http://ceph.com/rpm-${release}/el6/noarch", + baseurl => "http://ceph.com/rpm-${release}/el${el}/noarch", gpgcheck => '1', gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', mirrorlist => absent, @@ -137,7 +156,7 @@ tag => 'ceph', } - if $extras { + if $extras and $el == '6' { yumrepo { 'ext-ceph-extras': enabled => $enabled, @@ -159,7 +178,7 @@ enabled => $enabled, descr => 'FastCGI basearch packages for Ceph', name => 'ext-ceph-fastcgi', - baseurl => 'http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel6-x86_64-basic/ref/master', + baseurl => "http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel${el}-x86_64-basic/ref/master", gpgcheck => '1', gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc', mirrorlist => absent, diff --git a/ceph/manifests/rgw.pp b/ceph/manifests/rgw.pp index 727b93972..7fad377ab 100644 --- a/ceph/manifests/rgw.pp +++ b/ceph/manifests/rgw.pp @@ -24,7 +24,7 @@ # # === Parameters: # -# [*pgk_cephrgw*] Package name for the ceph radosgw. +# [*pkg_radosgw*] Package name for the ceph radosgw. # Optional. Default is osfamily dependent (check ceph::params). # # [*rgw_data*] The path where the radosgw data should be stored. diff --git a/ceph/metadata.json b/ceph/metadata.json index 989784120..8fee7ef4b 100644 --- a/ceph/metadata.json +++ b/ceph/metadata.json @@ -32,8 +32,8 @@ "description": "Installs and configures Ceph.", "dependencies": [ { "name": "puppetlabs/apt", "version_requirement": ">=1.4.0 <2.0.0" }, - { "name": "puppetlabs/apache", "version_requirement": ">=1.0.1 <2.0.0" }, - { "name": "puppetlabs/concat", "version_requirement": ">=1.1.0 <2.0.0" }, + { "name": "puppetlabs/apache", "version_requirement": ">=1.4.1 <2.0.0" }, + { "name": "puppetlabs/concat", "version_requirement": ">=1.2.1 <2.0.0" }, { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" } ] diff --git a/ceph/spec/acceptance/ceph_usecases_spec.rb b/ceph/spec/acceptance/ceph_usecases_spec.rb index 12ca10683..523ce2905 100644 --- a/ceph/spec/acceptance/ceph_usecases_spec.rb +++ b/ceph/spec/acceptance/ceph_usecases_spec.rb @@ -51,13 +51,13 @@ class { 'ceph': shell 'sleep 10' # we need to wait a bit until the OSD is up shell 'ceph -s', { :acceptable_exit_codes => [0] } do |r| - r.stdout.should =~ /1 mons at/ - r.stderr.should be_empty + expect(r.stdout).to match(/1 mons at/) + expect(r.stderr).to be_empty end shell 'ceph osd tree', { :acceptable_exit_codes => [0] } do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty end end @@ -93,14 +93,14 @@ class { 'ceph': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=a', { :acceptable_exit_codes => [1] } do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.a/ + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.a/) end end if osfamily == 'RedHat' shell 'service ceph status mon.a', { :acceptable_exit_codes => [1] } do |r| - r.stdout.should =~ /mon.a not found/ - r.stderr.should be_empty + expect(r.stdout).to match(/mon.a not found/) + expect(r.stderr).to be_empty end end end @@ -116,7 +116,9 @@ class { 'ceph::repo': EOS apply_manifest(pp, :catch_failures => true) - apply_manifest(pp, :catch_changes => true) + # can't check for idempotency because of https://tickets.puppetlabs.com/browse/PUP-1198 + #apply_manifest(pp, :catch_changes => true) + apply_manifest(pp, :catch_failures => true) end end end diff --git a/ceph/spec/acceptance/nodesets/centos70-x64.yml b/ceph/spec/acceptance/nodesets/centos70-x64.yml new file mode 100644 index 000000000..b75c1f17d --- /dev/null +++ b/ceph/spec/acceptance/nodesets/centos70-x64.yml @@ -0,0 +1,15 @@ +HOSTS: + first: + roles: + - master + - mon + - osd + - client + platform: el-7-x86_64 + box: puppetlabs/centos-7.0-64-nocm + box_url: https://atlas.hashicorp.com/puppetlabs/boxes/centos-7.0-64-nocm + hypervisor: vagrant + ip: 10.11.12.2 +CONFIG: + type: foss + set_env: false diff --git a/ceph/spec/acceptance/nodesets/nodepool.yml b/ceph/spec/acceptance/nodesets/nodepool-centos7.yml similarity index 76% rename from ceph/spec/acceptance/nodesets/nodepool.yml rename to ceph/spec/acceptance/nodesets/nodepool-centos7.yml index 0765232d5..d52883df5 100644 --- a/ceph/spec/acceptance/nodesets/nodepool.yml +++ b/ceph/spec/acceptance/nodesets/nodepool-centos7.yml @@ -5,8 +5,9 @@ HOSTS: - mon - osd - client - platform: ubuntu-1404-amd64 + platform: el-7-x86_64 hypervisor: none ip: 127.0.0.1 CONFIG: type: foss + set_env: false diff --git a/ceph/spec/acceptance/nodesets/nodepool-trusty.yml b/ceph/spec/acceptance/nodesets/nodepool-trusty.yml new file mode 100644 index 000000000..dc22d561a --- /dev/null +++ b/ceph/spec/acceptance/nodesets/nodepool-trusty.yml @@ -0,0 +1,13 @@ +HOSTS: + first: + roles: + - master + - mon + - osd + - client + platform: ubuntu-14.04-amd64 + hypervisor: none + ip: 127.0.0.1 +CONFIG: + type: foss + set_env: false diff --git a/ceph/spec/classes/ceph_init_spec.rb b/ceph/spec/classes/ceph_init_spec.rb index 82559368d..49f309cfe 100644 --- a/ceph/spec/classes/ceph_init_spec.rb +++ b/ceph/spec/classes/ceph_init_spec.rb @@ -41,6 +41,7 @@ it { is_expected.to_not contain_ceph_config('global/mon_osd_nearfull_ratio').with_value('85') } it { is_expected.to_not contain_ceph_config('global/mon_initial_members').with_value('mon.01') } it { is_expected.to_not contain_ceph_config('global/mon_host').with_value('mon01.ceph, mon02.ceph') } + it { is_expected.to_not contain_ceph_config('global/ms_bind_ipv6').with_value('false') } it { is_expected.to_not contain_ceph_config('global/require_signatures').with_value('false') } it { is_expected.to_not contain_ceph_config('global/cluster_require_signatures').with_value('false') } it { is_expected.to_not contain_ceph_config('global/service_require_signatures').with_value('false') } @@ -72,6 +73,7 @@ :mon_osd_nearfull_ratio => '90', :mon_initial_members => 'mon.01', :mon_host => 'mon01.ceph, mon02.ceph', + :ms_bind_ipv6 => 'true', :require_signatures => 'true', :cluster_require_signatures => 'true', :service_require_signatures => 'true', @@ -96,6 +98,7 @@ it { is_expected.to contain_ceph_config('global/mon_osd_nearfull_ratio').with_value('90') } it { is_expected.to contain_ceph_config('global/mon_initial_members').with_value('mon.01') } it { is_expected.to contain_ceph_config('global/mon_host').with_value('mon01.ceph, mon02.ceph') } + it { is_expected.to contain_ceph_config('global/ms_bind_ipv6').with_value('true') } it { is_expected.to contain_ceph_config('global/require_signatures').with_value('true') } it { is_expected.to contain_ceph_config('global/cluster_require_signatures').with_value('true') } it { is_expected.to contain_ceph_config('global/service_require_signatures').with_value('true') } diff --git a/ceph/spec/classes/ceph_mons_spec.rb b/ceph/spec/classes/ceph_mons_spec.rb index a46b6dbad..a21e5b46b 100644 --- a/ceph/spec/classes/ceph_mons_spec.rb +++ b/ceph/spec/classes/ceph_mons_spec.rb @@ -68,11 +68,22 @@ it_configures 'ceph mons' end - describe 'RedHat' do + describe 'RHEL6' do let :facts do { - :osfamily => 'RedHat', - :operatingsystem => 'RedHat', + :osfamily => 'RedHat', + :operatingsystemmajrelease => '6', + } + end + + it_configures 'ceph mons' + end + + describe 'RHEL7' do + let :facts do + { + :osfamily => 'RedHat', + :operatingsystemmajrelease => '7', } end diff --git a/ceph/spec/classes/ceph_profile_base_spec.rb b/ceph/spec/classes/ceph_profile_base_spec.rb index f1c1d9b9e..cdf4bf414 100644 --- a/ceph/spec/classes/ceph_profile_base_spec.rb +++ b/ceph/spec/classes/ceph_profile_base_spec.rb @@ -63,7 +63,18 @@ context 'on RHEL6' do let :facts do - { :osfamily => 'RedHat', } + { :osfamily => 'RedHat', + :operatingsystemmajrelease => '6' } + end + + it_configures 'ceph profile base' + end + + context 'on RHEL7' do + + let :facts do + { :osfamily => 'RedHat', + :operatingsystemmajrelease => '7' } end it_configures 'ceph profile base' diff --git a/ceph/spec/classes/ceph_profile_client_spec.rb b/ceph/spec/classes/ceph_profile_client_spec.rb index f5483ec3a..c830b4868 100644 --- a/ceph/spec/classes/ceph_profile_client_spec.rb +++ b/ceph/spec/classes/ceph_profile_client_spec.rb @@ -118,12 +118,22 @@ context 'on RHEL6' do let :facts do - { - :osfamily => 'RedHat', - :operatingsystem => 'RHEL6', - } + { :osfamily => 'RedHat', + :operatingsystemmajrelease => '6' } end + + it_configures 'ceph profile client' + end + + context 'on RHEL7' do + + let :facts do + { :osfamily => 'RedHat', + :operatingsystemmajrelease => '7' } + end + + it_configures 'ceph profile client' end end diff --git a/ceph/spec/classes/ceph_profile_mon_spec.rb b/ceph/spec/classes/ceph_profile_mon_spec.rb index b7de0bbd3..33bea69d3 100644 --- a/ceph/spec/classes/ceph_profile_mon_spec.rb +++ b/ceph/spec/classes/ceph_profile_mon_spec.rb @@ -92,11 +92,20 @@ context 'on RHEL6' do let :facts do - { - :osfamily => 'RedHat', - :operatingsystem => 'RHEL6', - :hostname => 'first', - } + { :osfamily => 'RedHat', + :hostname => 'first', + :operatingsystemmajrelease => '6' } + end + + it_configures 'ceph profile mon' + end + + context 'on RHEL7' do + + let :facts do + { :osfamily => 'RedHat', + :hostname => 'first', + :operatingsystemmajrelease => '7' } end it_configures 'ceph profile mon' diff --git a/ceph/spec/classes/ceph_profile_osd_spec.rb b/ceph/spec/classes/ceph_profile_osd_spec.rb index c4979d4b6..0b309bdd3 100644 --- a/ceph/spec/classes/ceph_profile_osd_spec.rb +++ b/ceph/spec/classes/ceph_profile_osd_spec.rb @@ -68,13 +68,21 @@ it_configures 'ceph profile osd' end - describe 'on RedHat' do + describe 'on RHEL6' do let :facts do - { - :osfamily => 'RedHat', - :operatingsystem => 'RHEL6', - } + { :osfamily => 'RedHat', + :operatingsystemmajrelease => '6' } + end + + it_configures 'ceph profile osd' + end + + describe 'on RHEL7' do + + let :facts do + { :osfamily => 'RedHat', + :operatingsystemmajrelease => '7' } end it_configures 'ceph profile osd' diff --git a/ceph/spec/classes/ceph_repo_spec.rb b/ceph/spec/classes/ceph_repo_spec.rb index 5181e8e67..c10e4442c 100644 --- a/ceph/spec/classes/ceph_repo_spec.rb +++ b/ceph/spec/classes/ceph_repo_spec.rb @@ -34,7 +34,7 @@ it { is_expected.to contain_apt__key('ceph').with( :key => '17ED316D', - :key_source => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' + :key_source => 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=keys/release.asc' ) } it { is_expected.to contain_apt__source('ceph').with( @@ -75,7 +75,7 @@ it { is_expected.to contain_apt__key('ceph').with( :key => '17ED316D', - :key_source => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' + :key_source => 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=keys/release.asc' ) } it { is_expected.to contain_apt__source('ceph').with( @@ -187,21 +187,24 @@ let :facts do { - :osfamily => 'RedHat', + :osfamily => 'RedHat', + :operatingsystem => 'RedHat', + :operatingsystemmajrelease => '6', } end describe "with default params" do - it { is_expected.to contain_yumrepo('ext-epel-6.8').with( + it { is_expected.to contain_yumrepo('ext-epel-6').with( :enabled => '1', - :descr => 'External EPEL 6.8', - :name => 'ext-epel-6.8', + :descr => 'External EPEL 6', + :name => 'ext-epel-6', :baseurl => 'absent', :gpgcheck => '0', :gpgkey => 'absent', :mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch', - :priority => '20' + :priority => '20', + :exclude => 'python-ceph-compat python-rbd python-rados python-cephfs', ) } it { is_expected.to contain_yumrepo('ext-ceph').with( @@ -234,15 +237,16 @@ } end - it { is_expected.to contain_yumrepo('ext-epel-6.8').with( + it { is_expected.to contain_yumrepo('ext-epel-6').with( :enabled => '1', - :descr => 'External EPEL 6.8', - :name => 'ext-epel-6.8', + :descr => 'External EPEL 6', + :name => 'ext-epel-6', :baseurl => 'absent', :gpgcheck => '0', :gpgkey => 'absent', :mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch', - :priority => '20' + :priority => '20', + :exclude => 'python-ceph-compat python-rbd python-rados python-cephfs', ) } it { is_expected.to contain_yumrepo('ext-ceph').with( @@ -277,15 +281,16 @@ } end - it { is_expected.to contain_yumrepo('ext-epel-6.8').with( + it { is_expected.to contain_yumrepo('ext-epel-6').with( :enabled => '0', - :descr => 'External EPEL 6.8', - :name => 'ext-epel-6.8', + :descr => 'External EPEL 6', + :name => 'ext-epel-6', :baseurl => 'absent', :gpgcheck => '0', :gpgkey => 'absent', :mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch', - :priority => '20' + :priority => '20', + :exclude => 'python-ceph-compat python-rbd python-rados python-cephfs', ) } it { is_expected.to contain_yumrepo('ext-ceph').with( @@ -341,15 +346,16 @@ } end - it { is_expected.to contain_yumrepo('ext-epel-6.8').with( + it { is_expected.to contain_yumrepo('ext-epel-6').with( :enabled => '1', - :descr => 'External EPEL 6.8', - :name => 'ext-epel-6.8', + :descr => 'External EPEL 6', + :name => 'ext-epel-6', :baseurl => 'absent', :gpgcheck => '0', :gpgkey => 'absent', :mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch', - :priority => '20' + :priority => '20', + :exclude => 'python-ceph-compat python-rbd python-rados python-cephfs', ) } it { is_expected.to contain_yumrepo('ext-ceph').with( @@ -394,15 +400,16 @@ } end - it { is_expected.to contain_yumrepo('ext-epel-6.8').with( + it { is_expected.to contain_yumrepo('ext-epel-6').with( :enabled => '1', - :descr => 'External EPEL 6.8', - :name => 'ext-epel-6.8', + :descr => 'External EPEL 6', + :name => 'ext-epel-6', :baseurl => 'absent', :gpgcheck => '0', :gpgkey => 'absent', :mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch', - :priority => '20' + :priority => '20', + :exclude => 'python-ceph-compat python-rbd python-rados python-cephfs', ) } it { is_expected.to contain_yumrepo('ext-ceph').with( @@ -441,4 +448,250 @@ end end + describe 'RHEL7' do + + let :facts do + { + :osfamily => 'RedHat', + :operatingsystem => 'RedHat', + :operatingsystemmajrelease => '7', + } + end + + describe "with default params" do + + it { should contain_yumrepo('ext-epel-7').with( + :enabled => '1', + :descr => 'External EPEL 7', + :name => 'ext-epel-7', + :baseurl => 'absent', + :gpgcheck => '0', + :gpgkey => 'absent', + :mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch', + :priority => '20', + :exclude => 'python-ceph-compat python-rbd python-rados python-cephfs', + ) } + + it { should contain_yumrepo('ext-ceph').with( + :enabled => '1', + :descr => 'External Ceph giant', + :name => 'ext-ceph-giant', + :baseurl => 'http://ceph.com/rpm-giant/el7/$basearch', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', + :mirrorlist => 'absent', + :priority => '10' + ) } + + it { should contain_yumrepo('ext-ceph-noarch').with( + :enabled => '1', + :descr => 'External Ceph noarch', + :name => 'ext-ceph-giant-noarch', + :baseurl => 'http://ceph.com/rpm-giant/el7/noarch', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', + :mirrorlist => 'absent', + :priority => '10' + ) } + end + + describe "when overriding ceph release" do + let :params do + { + :release => 'firefly' + } + end + + it { should contain_yumrepo('ext-epel-7').with( + :enabled => '1', + :descr => 'External EPEL 7', + :name => 'ext-epel-7', + :baseurl => 'absent', + :gpgcheck => '0', + :gpgkey => 'absent', + :mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch', + :priority => '20', + :exclude => 'python-ceph-compat python-rbd python-rados python-cephfs', + ) } + + it { should contain_yumrepo('ext-ceph').with( + :enabled => '1', + :descr => 'External Ceph firefly', + :name => 'ext-ceph-firefly', + :baseurl => 'http://ceph.com/rpm-firefly/el7/$basearch', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', + :mirrorlist => 'absent', + :priority => '10' + ) } + + it { should contain_yumrepo('ext-ceph-noarch').with( + :enabled => '1', + :descr => 'External Ceph noarch', + :name => 'ext-ceph-firefly-noarch', + :baseurl => 'http://ceph.com/rpm-firefly/el7/noarch', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', + :mirrorlist => 'absent', + :priority => '10' + ) } + end + + describe "with ensure => absent to disable" do + let :params do + { + :ensure => 'absent', + :extras => true, + :fastcgi => true + } + end + + it { should contain_yumrepo('ext-epel-7').with( + :enabled => '0', + :descr => 'External EPEL 7', + :name => 'ext-epel-7', + :baseurl => 'absent', + :gpgcheck => '0', + :gpgkey => 'absent', + :mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch', + :priority => '20', + :exclude => 'python-ceph-compat python-rbd python-rados python-cephfs', + ) } + + it { should contain_yumrepo('ext-ceph').with( + :enabled => '0', + :descr => 'External Ceph giant', + :name => 'ext-ceph-giant', + :baseurl => 'http://ceph.com/rpm-giant/el7/$basearch', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', + :mirrorlist => 'absent', + :priority => '10' + ) } + + it { should contain_yumrepo('ext-ceph-noarch').with( + :enabled => '0', + :descr => 'External Ceph noarch', + :name => 'ext-ceph-giant-noarch', + :baseurl => 'http://ceph.com/rpm-giant/el7/noarch', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', + :mirrorlist => 'absent', + :priority => '10' + ) } + + it { should_not contain_yumrepo('ext-ceph-extras') } + + it { should contain_yumrepo('ext-ceph-fastcgi').with( + :enabled => '0', + :descr => 'FastCGI basearch packages for Ceph', + :name => 'ext-ceph-fastcgi', + :baseurl => 'http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel7-x86_64-basic/ref/master', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc', + :mirrorlist => 'absent', + :priority => '20' + ) } + + end + + describe "with ceph extras" do + let :params do + { + :extras => true + } + end + + it { should contain_yumrepo('ext-epel-7').with( + :enabled => '1', + :descr => 'External EPEL 7', + :name => 'ext-epel-7', + :baseurl => 'absent', + :gpgcheck => '0', + :gpgkey => 'absent', + :mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch', + :priority => '20', + :exclude => 'python-ceph-compat python-rbd python-rados python-cephfs', + ) } + + it { should contain_yumrepo('ext-ceph').with( + :enabled => '1', + :descr => 'External Ceph giant', + :name => 'ext-ceph-giant', + :baseurl => 'http://ceph.com/rpm-giant/el7/$basearch', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', + :mirrorlist => 'absent', + :priority => '10' + ) } + + it { should contain_yumrepo('ext-ceph-noarch').with( + :enabled => '1', + :descr => 'External Ceph noarch', + :name => 'ext-ceph-giant-noarch', + :baseurl => 'http://ceph.com/rpm-giant/el7/noarch', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', + :mirrorlist => 'absent', + :priority => '10' + ) } + + it { should_not contain_yumrepo('ext-ceph-extras') } + end + + describe "with ceph fast-cgi" do + let :params do + { + :fastcgi => true + } + end + + it { should contain_yumrepo('ext-epel-7').with( + :enabled => '1', + :descr => 'External EPEL 7', + :name => 'ext-epel-7', + :baseurl => 'absent', + :gpgcheck => '0', + :gpgkey => 'absent', + :mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch', + :priority => '20', + :exclude => 'python-ceph-compat python-rbd python-rados python-cephfs', + ) } + + it { should contain_yumrepo('ext-ceph').with( + :enabled => '1', + :descr => 'External Ceph giant', + :name => 'ext-ceph-giant', + :baseurl => 'http://ceph.com/rpm-giant/el7/$basearch', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', + :mirrorlist => 'absent', + :priority => '10' + ) } + + it { should contain_yumrepo('ext-ceph-noarch').with( + :enabled => '1', + :descr => 'External Ceph noarch', + :name => 'ext-ceph-giant-noarch', + :baseurl => 'http://ceph.com/rpm-giant/el7/noarch', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc', + :mirrorlist => 'absent', + :priority => '10' + ) } + + it { should contain_yumrepo('ext-ceph-fastcgi').with( + :enabled => '1', + :descr => 'FastCGI basearch packages for Ceph', + :name => 'ext-ceph-fastcgi', + :baseurl => 'http://gitbuilder.ceph.com/mod_fastcgi-rpm-rhel7-x86_64-basic/ref/master', + :gpgcheck => '1', + :gpgkey => 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc', + :mirrorlist => 'absent', + :priority => '20' + ) } + + end + end + end diff --git a/ceph/spec/defines/ceph_key_spec.rb b/ceph/spec/defines/ceph_key_spec.rb index e614ecd49..38be3d543 100644 --- a/ceph/spec/defines/ceph_key_spec.rb +++ b/ceph/spec/defines/ceph_key_spec.rb @@ -40,15 +40,15 @@ end it { - should contain_exec('ceph-key-client.admin').with( + is_expected.to contain_exec('ceph-key-client.admin').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements\nset -ex\nceph-authtool /etc/ceph/ceph.client.admin.keyring --name 'client.admin' --add-key 'supersecret' --cap mon 'allow *' --cap osd 'allow rw' " ) - should contain_file('/etc/ceph/ceph.client.admin.keyring').with( + is_expected.to contain_file('/etc/ceph/ceph.client.admin.keyring').with( 'owner' => 'nobody', 'group' => 'nogroup', 'mode' => '0600' ) - should contain_exec('ceph-injectkey-client.admin').with( + is_expected.to contain_exec('ceph-injectkey-client.admin').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements\nset -ex\nceph auth add client.admin --in-file=/etc/ceph/ceph.client.admin.keyring" ) } diff --git a/ceph/spec/defines/ceph_mon_spec.rb b/ceph/spec/defines/ceph_mon_spec.rb index c74ef50fc..5c4656f32 100644 --- a/ceph/spec/defines/ceph_mon_spec.rb +++ b/ceph/spec/defines/ceph_mon_spec.rb @@ -39,7 +39,7 @@ it { expect { - should contain_service('ceph-mon-A').with('ensure' => 'running') + is_expected.to contain_service('ceph-mon-A').with('ensure' => 'running') }.to raise_error(Puppet::Error, /authentication_type cephx requires either key or keyring to be set but both are undef/) } end @@ -56,13 +56,13 @@ } end - it { should contain_service('ceph-mon-A').with('ensure' => 'running') } - it { should contain_exec('ceph-mon-ceph.client.admin.keyring-A').with( + it { is_expected.to contain_service('ceph-mon-A').with('ensure' => 'running') } + it { is_expected.to contain_exec('ceph-mon-ceph.client.admin.keyring-A').with( 'command' => '/bin/true # comment to satisfy puppet syntax requirements set -ex touch /etc/ceph/ceph.client.admin.keyring' ) } - it { should contain_exec('ceph-mon-mkfs-A').with( + it { is_expected.to contain_exec('ceph-mon-mkfs-A').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex mon_data=\$(ceph-mon --id A --show-config-value mon_data) @@ -80,10 +80,10 @@ fi ", 'logoutput' => true) } - it { should contain_file('/tmp/ceph-mon-keyring-A').with( + it { is_expected.to contain_file('/tmp/ceph-mon-keyring-A').with( 'mode' => '0444', 'content' => "[mon.]\n\tkey = AQATGHJTUCBqIBAA7M2yafV1xctn1pgr3GcKPg==\n\tcaps mon = \"allow *\"\n") } - it { should contain_exec('rm-keyring-A').with('command' => '/bin/rm /tmp/ceph-mon-keyring-A') } + it { is_expected.to contain_exec('rm-keyring-A').with('command' => '/bin/rm /tmp/ceph-mon-keyring-A') } end describe 'with keyring' do @@ -98,13 +98,13 @@ } end - it { should contain_service('ceph-mon-A').with('ensure' => 'running') } - it { should contain_exec('ceph-mon-ceph.client.admin.keyring-A').with( + it { is_expected.to contain_service('ceph-mon-A').with('ensure' => 'running') } + it { is_expected.to contain_exec('ceph-mon-ceph.client.admin.keyring-A').with( 'command' => '/bin/true # comment to satisfy puppet syntax requirements set -ex touch /etc/ceph/ceph.client.admin.keyring' ) } - it { should contain_exec('ceph-mon-mkfs-A').with( + it { is_expected.to contain_exec('ceph-mon-mkfs-A').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex mon_data=\$(ceph-mon --id A --show-config-value mon_data) @@ -138,13 +138,13 @@ } end - it { should contain_service('ceph-mon-A').with('ensure' => 'running') } - it { should contain_exec('ceph-mon-testcluster.client.admin.keyring-A').with( + it { is_expected.to contain_service('ceph-mon-A').with('ensure' => 'running') } + it { is_expected.to contain_exec('ceph-mon-testcluster.client.admin.keyring-A').with( 'command' => '/bin/true # comment to satisfy puppet syntax requirements set -ex touch /etc/ceph/testcluster.client.admin.keyring' ) } - it { should contain_exec('ceph-mon-mkfs-A').with( + it { is_expected.to contain_exec('ceph-mon-mkfs-A').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex mon_data=\$(ceph-mon --cluster testcluster --id A --show-config-value mon_data) @@ -179,8 +179,8 @@ } end - it { should contain_service('ceph-mon-A').with('ensure' => 'stopped') } - it { should contain_exec('remove-mon-A').with( + it { is_expected.to contain_service('ceph-mon-A').with('ensure' => 'stopped') } + it { is_expected.to contain_exec('remove-mon-A').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex mon_data=\$(ceph-mon --cluster testcluster --id A --show-config-value mon_data) @@ -196,12 +196,12 @@ end end - context 'RHEL6' do + context 'RHEL7' do let :facts do { :osfamily => 'RedHat', - :operatingsystem => 'RHEL6', + :operatingsystem => 'RHEL7', } end @@ -213,7 +213,7 @@ it { expect { - should contain_service('ceph-mon-A').with('ensure' => 'running') + is_expected.to contain_service('ceph-mon-A').with('ensure' => 'running') }.to raise_error(Puppet::Error, /authentication_type cephx requires either key or keyring to be set but both are undef/) } end @@ -230,13 +230,13 @@ } end - it { should contain_service('ceph-mon-A').with('ensure' => 'running') } - it { should contain_exec('ceph-mon-ceph.client.admin.keyring-A').with( + it { is_expected.to contain_service('ceph-mon-A').with('ensure' => 'running') } + it { is_expected.to contain_exec('ceph-mon-ceph.client.admin.keyring-A').with( 'command' => '/bin/true # comment to satisfy puppet syntax requirements set -ex touch /etc/ceph/ceph.client.admin.keyring' ) } - it { should contain_exec('ceph-mon-mkfs-A').with( + it { is_expected.to contain_exec('ceph-mon-mkfs-A').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex mon_data=\$(ceph-mon --id A --show-config-value mon_data) @@ -254,10 +254,10 @@ fi ", 'logoutput' => true) } - it { should contain_file('/tmp/ceph-mon-keyring-A').with( + it { is_expected.to contain_file('/tmp/ceph-mon-keyring-A').with( 'mode' => '0444', 'content' => "[mon.]\n\tkey = AQATGHJTUCBqIBAA7M2yafV1xctn1pgr3GcKPg==\n\tcaps mon = \"allow *\"\n") } - it { should contain_exec('rm-keyring-A').with('command' => '/bin/rm /tmp/ceph-mon-keyring-A') } + it { is_expected.to contain_exec('rm-keyring-A').with('command' => '/bin/rm /tmp/ceph-mon-keyring-A') } end describe 'with keyring' do @@ -272,13 +272,13 @@ } end - it { should contain_service('ceph-mon-A').with('ensure' => 'running') } - it { should contain_exec('ceph-mon-ceph.client.admin.keyring-A').with( + it { is_expected.to contain_service('ceph-mon-A').with('ensure' => 'running') } + it { is_expected.to contain_exec('ceph-mon-ceph.client.admin.keyring-A').with( 'command' => '/bin/true # comment to satisfy puppet syntax requirements set -ex touch /etc/ceph/ceph.client.admin.keyring' ) } - it { should contain_exec('ceph-mon-mkfs-A').with( + it { is_expected.to contain_exec('ceph-mon-mkfs-A').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex mon_data=\$(ceph-mon --id A --show-config-value mon_data) @@ -312,13 +312,13 @@ } end - it { should contain_service('ceph-mon-A').with('ensure' => 'running') } - it { should contain_exec('ceph-mon-testcluster.client.admin.keyring-A').with( + it { is_expected.to contain_service('ceph-mon-A').with('ensure' => 'running') } + it { is_expected.to contain_exec('ceph-mon-testcluster.client.admin.keyring-A').with( 'command' => '/bin/true # comment to satisfy puppet syntax requirements set -ex touch /etc/ceph/testcluster.client.admin.keyring' ) } - it { should contain_exec('ceph-mon-mkfs-A').with( + it { is_expected.to contain_exec('ceph-mon-mkfs-A').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex mon_data=\$(ceph-mon --cluster testcluster --id A --show-config-value mon_data) @@ -353,8 +353,8 @@ } end - it { should contain_service('ceph-mon-A').with('ensure' => 'stopped') } - it { should contain_exec('remove-mon-A').with( + it { is_expected.to contain_service('ceph-mon-A').with('ensure' => 'stopped') } + it { is_expected.to contain_exec('remove-mon-A').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex mon_data=\$(ceph-mon --cluster testcluster --id A --show-config-value mon_data) diff --git a/ceph/spec/defines/ceph_osd_spec.rb b/ceph/spec/defines/ceph_osd_spec.rb index 2a4895e36..c66fcfbb8 100644 --- a/ceph/spec/defines/ceph_osd_spec.rb +++ b/ceph/spec/defines/ceph_osd_spec.rb @@ -29,7 +29,7 @@ '/tmp' end - it { should contain_exec('ceph-osd-prepare-/tmp').with( + it { is_expected.to contain_exec('ceph-osd-prepare-/tmp').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex if ! test -b /tmp ; then @@ -45,7 +45,7 @@ ", 'logoutput' => true ) } - it { should contain_exec('ceph-osd-activate-/tmp').with( + it { is_expected.to contain_exec('ceph-osd-activate-/tmp').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex if ! test -b /tmp ; then @@ -78,7 +78,7 @@ } end - it { should contain_exec('ceph-osd-prepare-/tmp/data').with( + it { is_expected.to contain_exec('ceph-osd-prepare-/tmp/data').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex if ! test -b /tmp/data ; then @@ -94,7 +94,7 @@ ", 'logoutput' => true ) } - it { should contain_exec('ceph-osd-activate-/tmp/data').with( + it { is_expected.to contain_exec('ceph-osd-activate-/tmp/data').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex if ! test -b /tmp/data ; then @@ -126,7 +126,7 @@ } end - it { should contain_exec('remove-osd-/tmp').with( + it { is_expected.to contain_exec('remove-osd-/tmp').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements set -ex if [ -z \"\$id\" ] ; then diff --git a/ceph/spec/defines/ceph_pool_spec.rb b/ceph/spec/defines/ceph_pool_spec.rb index ff85173b4..97e171588 100644 --- a/ceph/spec/defines/ceph_pool_spec.rb +++ b/ceph/spec/defines/ceph_pool_spec.rb @@ -37,19 +37,19 @@ end it { - should contain_exec('create-volumes').with( + is_expected.to contain_exec('create-volumes').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements\nset -ex\nceph osd pool create volumes 3" ) - should contain_exec('set-volumes-pg_num').with( + is_expected.to contain_exec('set-volumes-pg_num').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements\nset -ex\nceph osd pool set volumes pg_num 3" ) - should contain_exec('set-volumes-pgp_num').with( + is_expected.to contain_exec('set-volumes-pgp_num').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements\nset -ex\nceph osd pool set volumes pgp_num 4" ) - should contain_exec('set-volumes-size').with( + is_expected.to contain_exec('set-volumes-size').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements\nset -ex\nceph osd pool set volumes size 2" ) - should_not contain_exec('delete-volumes') + is_expected.not_to contain_exec('delete-volumes') } end @@ -67,8 +67,8 @@ end it { - should_not contain_exec('create-volumes') - should contain_exec('delete-volumes').with( + is_expected.not_to contain_exec('create-volumes') + is_expected.to contain_exec('delete-volumes').with( 'command' => "/bin/true # comment to satisfy puppet syntax requirements\nset -ex\nceph osd pool delete volumes volumes --yes-i-really-really-mean-it" ) } diff --git a/ceph/spec/defines/ceph_rgw_apache_spec.rb b/ceph/spec/defines/ceph_rgw_apache_spec.rb index e1450074c..1d65e9332 100644 --- a/ceph/spec/defines/ceph_rgw_apache_spec.rb +++ b/ceph/spec/defines/ceph_rgw_apache_spec.rb @@ -49,7 +49,7 @@ class { 'ceph::repo': 'radosgw.gateway' end - it { should contain_apache__vhost('myhost.domain-radosgw').with( { + it { is_expected.to contain_apache__vhost('myhost.domain-radosgw').with( { 'servername' => 'myhost.domain', 'serveradmin' => 'root@localhost', 'port' => 80, @@ -63,14 +63,14 @@ class { 'ceph::repo': ServerSignature Off", })} - it { should contain_class('apache') } - it { should contain_class('apache::mod::alias') } - it { should contain_class('apache::mod::auth_basic') } - it { should contain_apache__mod('fastcgi').with ( { 'package' => 'libapache2-mod-fastcgi' } ) } - it { should contain_class('apache::mod::mime') } - it { should contain_class('apache::mod::rewrite') } + it { is_expected.to contain_class('apache') } + it { is_expected.to contain_class('apache::mod::alias') } + it { is_expected.to contain_class('apache::mod::auth_basic') } + it { is_expected.to contain_apache__mod('fastcgi').with ( { 'package' => 'libapache2-mod-fastcgi' } ) } + it { is_expected.to contain_class('apache::mod::mime') } + it { is_expected.to contain_class('apache::mod::rewrite') } - it { should contain_file('/var/www/s3gw.fcgi').with({ + it { is_expected.to contain_file('/var/www/s3gw.fcgi').with({ 'ensure' => 'file', 'owner' => 'root', 'group' => 'root', @@ -98,7 +98,7 @@ class { 'ceph::repo': } end - it { should contain_apache__vhost('mydns.hostname-radosgw').with( { + it { is_expected.to contain_apache__vhost('mydns.hostname-radosgw').with( { 'servername' => 'mydns.hostname', 'serveradmin' => 'admin@hostname', 'port' => 1111, @@ -112,14 +112,14 @@ class { 'ceph::repo': ServerSignature Off", } ) } - it { should contain_class('apache') } - it { should contain_class('apache::mod::alias') } - it { should contain_class('apache::mod::auth_basic') } - it { should contain_apache__mod('fastcgi').with ( { 'package' => 'libapache2-mod-fastcgi' } ) } - it { should contain_class('apache::mod::mime') } - it { should contain_class('apache::mod::rewrite') } + it { is_expected.to contain_class('apache') } + it { is_expected.to contain_class('apache::mod::alias') } + it { is_expected.to contain_class('apache::mod::auth_basic') } + it { is_expected.to contain_apache__mod('fastcgi').with ( { 'package' => 'libapache2-mod-fastcgi' } ) } + it { is_expected.to contain_class('apache::mod::mime') } + it { is_expected.to contain_class('apache::mod::rewrite') } - it { should contain_file('/some/fcgi/filepath') } + it { is_expected.to contain_file('/some/fcgi/filepath') } end end @@ -144,7 +144,7 @@ class { 'ceph::repo': 'radosgw.gateway' end - it { should contain_apache__vhost('myhost.domain-radosgw').with( { + it { is_expected.to contain_apache__vhost('myhost.domain-radosgw').with( { 'servername' => 'myhost.domain', 'serveradmin' => 'root@localhost', 'port' => 80, @@ -158,14 +158,14 @@ class { 'ceph::repo': ServerSignature Off", })} - it { should contain_class('apache') } - it { should contain_class('apache::mod::alias') } - it { should contain_class('apache::mod::auth_basic') } - it { should contain_apache__mod('fastcgi').with ( { 'package' => 'mod_fastcgi' } ) } - it { should contain_class('apache::mod::mime') } - it { should contain_class('apache::mod::rewrite') } + it { is_expected.to contain_class('apache') } + it { is_expected.to contain_class('apache::mod::alias') } + it { is_expected.to contain_class('apache::mod::auth_basic') } + it { is_expected.to contain_apache__mod('fastcgi').with ( { 'package' => 'mod_fastcgi' } ) } + it { is_expected.to contain_class('apache::mod::mime') } + it { is_expected.to contain_class('apache::mod::rewrite') } - it { should contain_file('/var/www/s3gw.fcgi').with({ + it { is_expected.to contain_file('/var/www/s3gw.fcgi').with({ 'ensure' => 'file', 'owner' => 'root', 'group' => 'root', @@ -193,7 +193,7 @@ class { 'ceph::repo': } end - it { should contain_apache__vhost('mydns.hostname-radosgw').with( { + it { is_expected.to contain_apache__vhost('mydns.hostname-radosgw').with( { 'servername' => 'mydns.hostname', 'serveradmin' => 'admin@hostname', 'port' => 1111, @@ -207,14 +207,14 @@ class { 'ceph::repo': ServerSignature Off", } ) } - it { should contain_class('apache') } - it { should contain_class('apache::mod::alias') } - it { should contain_class('apache::mod::auth_basic') } - it { should contain_apache__mod('fastcgi').with ( { 'package' => 'mod_fastcgi' } ) } - it { should contain_class('apache::mod::mime') } - it { should contain_class('apache::mod::rewrite') } + it { is_expected.to contain_class('apache') } + it { is_expected.to contain_class('apache::mod::alias') } + it { is_expected.to contain_class('apache::mod::auth_basic') } + it { is_expected.to contain_apache__mod('fastcgi').with ( { 'package' => 'mod_fastcgi' } ) } + it { is_expected.to contain_class('apache::mod::mime') } + it { is_expected.to contain_class('apache::mod::rewrite') } - it { should contain_file('/some/fcgi/filepath') } + it { is_expected.to contain_file('/some/fcgi/filepath') } end end diff --git a/ceph/spec/defines/ceph_rgw_spec.rb b/ceph/spec/defines/ceph_rgw_spec.rb index a3e3617ff..0fc1c8622 100644 --- a/ceph/spec/defines/ceph_rgw_spec.rb +++ b/ceph/spec/defines/ceph_rgw_spec.rb @@ -31,29 +31,29 @@ 'radosgw.gateway' end - it { should contain_package("#{default_params[:pkg_radosgw]}").with('ensure' => 'installed') } - it { should contain_ceph_config('client.radosgw.gateway/user').with_value("#{default_params[:user]}") } - it { should contain_ceph_config('client.radosgw.gateway/host').with_value('myhost') } - it { should contain_ceph_config('client.radosgw.gateway/keyring').with_value('/etc/ceph/ceph.client.radosgw.gateway.keyring') } - it { should contain_ceph_config('client.radosgw.gateway/log_file').with_value('/var/log/ceph/radosgw.log') } - it { should contain_ceph_config('client.radosgw.gateway/rgw_dns_name').with_value('myhost.domain') } - it { should contain_ceph_config('client.radosgw.gateway/rgw_print_continue').with_value(true) } - it { should contain_ceph_config('client.radosgw.gateway/rgw_socket_path').with_value('/tmp/radosgw.sock') } - it { should contain_ceph_config('client.radosgw.gateway/rgw_port').with_value(80) } - - it { should contain_file('/var/lib/ceph/radosgw').with({ + it { is_expected.to contain_package("#{default_params[:pkg_radosgw]}").with('ensure' => 'installed') } + it { is_expected.to contain_ceph_config('client.radosgw.gateway/user').with_value("#{default_params[:user]}") } + it { is_expected.to contain_ceph_config('client.radosgw.gateway/host').with_value('myhost') } + it { is_expected.to contain_ceph_config('client.radosgw.gateway/keyring').with_value('/etc/ceph/ceph.client.radosgw.gateway.keyring') } + it { is_expected.to contain_ceph_config('client.radosgw.gateway/log_file').with_value('/var/log/ceph/radosgw.log') } + it { is_expected.to contain_ceph_config('client.radosgw.gateway/rgw_dns_name').with_value('myhost.domain') } + it { is_expected.to contain_ceph_config('client.radosgw.gateway/rgw_print_continue').with_value(true) } + it { is_expected.to contain_ceph_config('client.radosgw.gateway/rgw_socket_path').with_value('/tmp/radosgw.sock') } + it { is_expected.to contain_ceph_config('client.radosgw.gateway/rgw_port').with_value(80) } + + it { is_expected.to contain_file('/var/lib/ceph/radosgw').with({ 'ensure' => 'directory', 'mode' => '0755', })} - it { should contain_file('/var/lib/ceph/radosgw/ceph-radosgw.gateway').with({ + it { is_expected.to contain_file('/var/lib/ceph/radosgw/ceph-radosgw.gateway').with({ 'ensure' => 'directory', 'owner' => 'root', 'group' => 'root', 'mode' => '0750', })} - it { should contain_service('radosgw-radosgw.gateway') } + it { is_expected.to contain_service('radosgw-radosgw.gateway') } end @@ -78,25 +78,25 @@ } end - it { should contain_package('pkgradosgw').with('ensure' => 'installed') } + it { is_expected.to contain_package('pkgradosgw').with('ensure' => 'installed') } - it { should contain_ceph_config('client.myid/host').with_value('myhost') } - it { should contain_ceph_config('client.myid/keyring').with_value('/etc/ceph/ceph.myid.keyring') } - it { should contain_ceph_config('client.myid/log_file').with_value('/var/log/ceph/mylogfile.log') } - it { should contain_ceph_config('client.myid/rgw_dns_name').with_value('mydns.hostname') } - it { should contain_ceph_config('client.myid/rgw_print_continue').with_value(false) } - it { should contain_ceph_config('client.myid/rgw_socket_path').with_value('/some/location/radosgw.sock') } - it { should contain_ceph_config('client.myid/rgw_port').with_value(1111) } - it { should contain_ceph_config('client.myid/user').with_value('wwwuser') } + it { is_expected.to contain_ceph_config('client.myid/host').with_value('myhost') } + it { is_expected.to contain_ceph_config('client.myid/keyring').with_value('/etc/ceph/ceph.myid.keyring') } + it { is_expected.to contain_ceph_config('client.myid/log_file').with_value('/var/log/ceph/mylogfile.log') } + it { is_expected.to contain_ceph_config('client.myid/rgw_dns_name').with_value('mydns.hostname') } + it { is_expected.to contain_ceph_config('client.myid/rgw_print_continue').with_value(false) } + it { is_expected.to contain_ceph_config('client.myid/rgw_socket_path').with_value('/some/location/radosgw.sock') } + it { is_expected.to contain_ceph_config('client.myid/rgw_port').with_value(1111) } + it { is_expected.to contain_ceph_config('client.myid/user').with_value('wwwuser') } - it { should contain_file('/var/lib/ceph/radosgw/ceph-myid').with( { + it { is_expected.to contain_file('/var/lib/ceph/radosgw/ceph-myid').with( { 'ensure' => 'directory', 'owner' => 'root', 'group' => 'root', 'mode' => '0750', } ) } - it { should contain_service('radosgw-myid') } + it { is_expected.to contain_service('radosgw-myid') } end diff --git a/ceph/spec/spec_helper_acceptance.rb b/ceph/spec/spec_helper_acceptance.rb index f301c819e..e6803f79e 100644 --- a/ceph/spec/spec_helper_acceptance.rb +++ b/ceph/spec/spec_helper_acceptance.rb @@ -24,13 +24,18 @@ hosts.each do |host| install_puppet + # clean out any module cruft + shell('rm -fr /etc/puppet/modules/*') on host, "mkdir -p #{host['distmoduledir']}" + # we will provide our own epel with some excludes later + shell('test -f /etc/debian_version || yum-config-manager --disable epel') end + c.formatter = :documentation + c.before :suite do - puppet_module_install(:source => proj_root, :module_name => 'ceph') - scp_to hosts, File.join(proj_root, 'spec/fixtures/hieradata/hiera.yaml'), '/etc/puppet/hiera.yaml' hosts.each do |host| + scp_to hosts, File.join(proj_root, 'spec/fixtures/hieradata/hiera.yaml'), '/etc/puppet/hiera.yaml' # https://tickets.puppetlabs.com/browse/PUP-2566 on host, 'sed -i "/templatedir/d" /etc/puppet/puppet.conf' install_package host, 'git' @@ -38,8 +43,9 @@ on host, puppet('module install puppetlabs/stdlib --version ">=4.0.0 <5.0.0"'), { :acceptable_exit_codes => [0,1] } on host, puppet('module install puppetlabs/inifile --version ">=1.0.0 <2.0.0"'), { :acceptable_exit_codes => [0,1] } on host, puppet('module install puppetlabs/apt --version ">=1.4.0 <2.0.0"'), { :acceptable_exit_codes => [0,1] } - on host, puppet('module install puppetlabs/concat --version ">=1.1.0 <2.0.0"'), { :acceptable_exit_codes => [0,1] } - on host, puppet('module install puppetlabs/apache --version ">=1.0.1 <2.0.0"'), { :acceptable_exit_codes => [0,1] } + on host, puppet('module install puppetlabs/concat --version ">=1.2.1 <2.0.0"'), { :acceptable_exit_codes => [0,1] } + on host, puppet('module install puppetlabs/apache --version ">=1.4.1 <2.0.0"'), { :acceptable_exit_codes => [0,1] } + puppet_module_install(:source => proj_root, :module_name => 'ceph') # Flush the firewall flushfw = <<-EOS iptables -F diff --git a/ceph/spec/spec_helper_system.rb b/ceph/spec/spec_helper_system.rb index ab82e0cb7..3f725cbf9 100644 --- a/ceph/spec/spec_helper_system.rb +++ b/ceph/spec/spec_helper_system.rb @@ -44,15 +44,15 @@ puppet_module_install(:source => File.join(proj_root, '../scenario_node_terminus'), :module_name => 'scenario_node_terminus', :node => vm) - shell(:command => 'puppet module install --version 4.x puppetlabs/stdlib', + shell(:command => 'puppet module install --version ">=4.0.0 <5.0.0" puppetlabs/stdlib', :node => vm) - shell(:command => 'puppet module install --version 1.0.0 puppetlabs/inifile', + shell(:command => 'puppet module install --version ">=1.0.0 <2.0.0" puppetlabs/inifile', :node => vm) - shell(:command => 'puppet module install --version 1.4.0 puppetlabs/apt', + shell(:command => 'puppet module install --version ">=1.4.0 <2.0.0" puppetlabs/apt', :node => vm) - shell(:command => 'puppet module install --version 1.1.x puppetlabs/concat', + shell(:command => 'puppet module install --version ">=1.2.1 <2.0.0" puppetlabs/concat', :node => vm) - shell(:command => 'puppet module install --version 1.0.1 puppetlabs/apache', + shell(:command => 'puppet module install --version ">=1.4.1 <2.0.0" puppetlabs/apache', :node => vm) rcp(:sp => File.join(proj_root, 'spec/fixtures/hieradata/hiera.yaml'), :dp => '/etc/puppet/hiera.yaml', diff --git a/ceph/spec/system/ceph_key_spec.rb b/ceph/spec/system/ceph_key_spec.rb index 8e3fa645a..c18389c8d 100644 --- a/ceph/spec/system/ceph_key_spec.rb +++ b/ceph/spec/system/ceph_key_spec.rb @@ -67,7 +67,7 @@ class { 'ceph::repo': machines.each do |mon| puppet_apply(:node => mon, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -85,7 +85,7 @@ class { 'ceph::repo': machines.each do |mon| puppet_apply(:node => mon, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -117,33 +117,33 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph auth list' do |r| - r.stdout.should_not =~ /client.admin/ - r.exit_code.should be_zero + expect(r.stdout).not_to match(/client.admin/) + expect(r.exit_code).to be_zero end shell 'ls -l /etc/ceph/ceph.client.admin.keyring' do |r| - r.stdout.should =~ /.*-rw-------.*root\sroot.*/m - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/.*-rw-------.*root\sroot.*/m) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'cat /etc/ceph/ceph.client.admin.keyring' do |r| - r.stdout.should =~ /.*\[client.admin\].*key = #{admin_key}.*caps mds = "allow \*".*caps mon = "allow \*".*caps osd = "allow \*".*/m - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/.*\[client.admin\].*key = #{admin_key}.*caps mds = "allow \*".*caps mon = "allow \*".*caps osd = "allow \*".*/m) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one monitor and all packages' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end @@ -192,34 +192,34 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 - r.stdout.should_not =~ /Exec\[ceph-key-client\.admin\]/ # client.admin key needs to contain a / character! + expect(r.exit_code).not_to eq(1) + expect(r.stdout).not_to match(/Exec\[ceph-key-client\.admin\]/) # client.admin key needs to contain a / character! end shell 'ceph auth list' do |r| - r.stdout.should =~ /.*client\.volumes.*key:\s#{volume_key}.*/m + expect(r.stdout).to match(/.*client\.volumes.*key:\s#{volume_key}.*/m) # r.stderr.should be_empty # ceph auth writes to stderr! - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell 'ls -l /etc/ceph/ceph.client.volumes.keyring' do |r| - r.stdout.should =~ /.*-rw-------.*nobody\s#{nogroup}.*/m - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/.*-rw-------.*nobody\s#{nogroup}.*/m) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'cat /etc/ceph/ceph.client.volumes.keyring' do |r| - r.stdout.should =~ /.*\[client.volumes\].*key = #{volume_key}.*caps mon = "allow \*".*caps osd = "allow rw".*/m - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/.*\[client.volumes\].*key = #{volume_key}.*caps mon = "allow \*".*caps osd = "allow rw".*/m) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one monitor and all packages' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end diff --git a/ceph/spec/system/ceph_mon_spec.rb b/ceph/spec/system/ceph_mon_spec.rb index 8d9939ba5..d0a4c93fd 100644 --- a/ceph/spec/system/ceph_mon_spec.rb +++ b/ceph/spec/system/ceph_mon_spec.rb @@ -40,7 +40,7 @@ class { 'ceph::repo': machines.each do |mon| puppet_apply(:node => mon, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -58,7 +58,7 @@ class { 'ceph::repo': machines.each do |mon| puppet_apply(:node => mon, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -78,15 +78,15 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons at/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons at/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end @@ -98,9 +98,9 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end osfamily = facter.facts['osfamily'] @@ -108,16 +108,16 @@ class { 'ceph': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=a' do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.a/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.a/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.a' do |r| - r.stdout.should =~ /mon.a not found/ - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to match(/mon.a not found/) + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end end @@ -137,13 +137,13 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'test -z "$(cat /etc/ceph/ceph.client.admin.keyring)"' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end end @@ -155,9 +155,9 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end osfamily = facter.facts['osfamily'] @@ -165,16 +165,16 @@ class { 'ceph': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=a' do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.a/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.a/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.a' do |r| - r.stdout.should =~ /mon.a not found/ - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to match(/mon.a not found/) + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end end @@ -198,13 +198,13 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'test -f /etc/ceph/ceph.client.admin.keyring' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end end @@ -216,9 +216,9 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end osfamily = facter.facts['osfamily'] @@ -226,16 +226,16 @@ class { 'ceph': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=a' do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.a/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.a/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.a' do |r| - r.stdout.should =~ /mon.a not found/ - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to match(/mon.a not found/) + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end end @@ -243,7 +243,7 @@ class { 'ceph': describe 'on two hosts' do it 'should be two hosts' do - machines.size.should == 2 + expect(machines.size).to eq(2) end it 'should install two monitors' do @@ -262,16 +262,16 @@ class { 'ceph': EOS puppet_apply(:node => mon, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end shell 'ceph -s' do |r| - r.stdout.should =~ /2 mons .* quorum 0,1/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/2 mons .* quorum 0,1/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end @@ -284,9 +284,9 @@ class { 'ceph': EOS puppet_apply(:node => mon, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end osfamily = facter.facts['osfamily'] @@ -294,16 +294,16 @@ class { 'ceph': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell "status ceph-mon id=#{mon}" do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.#{mon}/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.#{mon}/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell "service ceph status mon.#{mon}" do |r| - r.stdout.should =~ /mon.#{mon} not found/ - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to match(/mon.#{mon} not found/) + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end end diff --git a/ceph/spec/system/ceph_mons_spec.rb b/ceph/spec/system/ceph_mons_spec.rb index 745e05815..6894edf99 100644 --- a/ceph/spec/system/ceph_mons_spec.rb +++ b/ceph/spec/system/ceph_mons_spec.rb @@ -117,15 +117,15 @@ end puppet_apply(data_site_pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons at/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons at/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end @@ -146,7 +146,7 @@ end puppet_apply(data_site_pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end osfamily = facter.facts['osfamily'] @@ -154,16 +154,16 @@ if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /status: Unknown job: ceph-mon/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/status: Unknown job: ceph-mon/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /ceph: unrecognized service/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/ceph: unrecognized service/) + expect(r.exit_code).not_to be_zero end end end @@ -201,26 +201,26 @@ end puppet_apply(data_site_pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'cat /etc/ceph/ceph.client.admin.keyring' do |r| - r.stdout.should =~ /#{admin_key}/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/#{admin_key}/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons at/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons at/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph auth list' do |r| - r.stdout.should =~ /#{admin_key}/ - r.exit_code.should be_zero + expect(r.stdout).to match(/#{admin_key}/) + expect(r.exit_code).to be_zero end end @@ -242,7 +242,7 @@ end puppet_apply(data_site_pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end osfamily = facter.facts['osfamily'] @@ -250,16 +250,16 @@ if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /status: Unknown job: ceph-mon/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/status: Unknown job: ceph-mon/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /ceph: unrecognized service/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/ceph: unrecognized service/) + expect(r.exit_code).not_to be_zero end end end @@ -282,7 +282,7 @@ machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end rcp(:sp => snt_data, :dp => data_path, :d => node(:name => vm)) @@ -334,15 +334,15 @@ end puppet_apply('') do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons at/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons at/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end @@ -359,7 +359,7 @@ end puppet_apply('') do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end osfamily = facter.facts['osfamily'] @@ -367,16 +367,16 @@ if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /status: Unknown job: ceph-mon/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/status: Unknown job: ceph-mon/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /ceph: unrecognized service/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/ceph: unrecognized service/) + expect(r.exit_code).not_to be_zero end end end @@ -416,26 +416,26 @@ end puppet_apply('') do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'cat /etc/ceph/ceph.client.admin.keyring' do |r| - r.stdout.should =~ /#{admin_key}/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/#{admin_key}/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons at/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons at/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph auth list' do |r| - r.stdout.should =~ /#{admin_key}/ - r.exit_code.should be_zero + expect(r.stdout).to match(/#{admin_key}/) + expect(r.exit_code).to be_zero end end @@ -452,7 +452,7 @@ end puppet_apply('') do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end osfamily = facter.facts['osfamily'] @@ -460,16 +460,16 @@ if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /status: Unknown job: ceph-mon/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/status: Unknown job: ceph-mon/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /ceph: unrecognized service/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/ceph: unrecognized service/) + expect(r.exit_code).not_to be_zero end end end diff --git a/ceph/spec/system/ceph_osd_spec.rb b/ceph/spec/system/ceph_osd_spec.rb index d5e3ebf38..5f67f1eea 100644 --- a/ceph/spec/system/ceph_osd_spec.rb +++ b/ceph/spec/system/ceph_osd_spec.rb @@ -56,7 +56,7 @@ class { 'ceph::repo': machines.each do |mon| puppet_apply(:node => mon, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -74,7 +74,7 @@ class { 'ceph::repo': machines.each do |mon| puppet_apply(:node => mon, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -98,22 +98,22 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -123,20 +123,20 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell "test -b #{data} && ceph-disk zap #{data}" end it 'should uninstall one monitor and all packages' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end @@ -172,21 +172,21 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -194,13 +194,13 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell "test -b #{data} && ceph-disk zap #{data}" @@ -208,7 +208,7 @@ class { 'ceph': it 'should uninstall one monitor and all packages' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end @@ -233,21 +233,21 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd and external journal' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -255,26 +255,26 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell "test -b #{data} && ceph-disk zap #{data}" end it 'should uninstall one monitor and all packages' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end it 'should install one OSD no cephx on a partition' do shell 'sgdisk --largest-new=1 --change-name="1:ceph data" --partition-guid=1:7aebb13f-d4a5-4b94-8622-355d2b5401f1 --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/sdb' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end pp = <<-EOS @@ -294,21 +294,21 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -318,20 +318,20 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell 'ceph-disk zap /dev/sdb' end it 'should install one OSD no cephx on partition and activate after umount' do shell 'sgdisk --delete=1 /dev/sdb || true; sgdisk --largest-new=1 --change-name="1:ceph data" --partition-guid=1:7aebb13f-d4a5-4b94-8622-355d2b5401f1 --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/sdb' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end pp = <<-EOS @@ -351,41 +351,41 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end # stop and umount (but leave it prepared) shell 'stop ceph-osd id=0 || /etc/init.d/ceph stop osd.0; umount /dev/sdb1' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end # rerun puppet (should activate but not prepare) puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end # check osd up and same osd.id shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0\s*up/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0\s*up/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -395,20 +395,20 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell 'ceph-disk zap /dev/sdb' end it 'should uninstall one monitor and all packages' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end diff --git a/ceph/spec/system/ceph_osds_spec.rb b/ceph/spec/system/ceph_osds_spec.rb index 9df41a206..d398d4553 100644 --- a/ceph/spec/system/ceph_osds_spec.rb +++ b/ceph/spec/system/ceph_osds_spec.rb @@ -52,7 +52,7 @@ machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end rcp(:sp => data, :dp => data_path, :d => node(:name => vm)) @@ -114,7 +114,7 @@ machines.each do |vm| puppet_apply('') do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell(:node => vm, :command => 'test -b /dev/sdb && sgdisk --zap-all --clear --mbrtogpt -- /dev/sdb') @@ -148,15 +148,15 @@ end puppet_apply('') do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end end @@ -199,15 +199,15 @@ end puppet_apply('') do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end end diff --git a/ceph/spec/system/ceph_pool_spec.rb b/ceph/spec/system/ceph_pool_spec.rb index a1ca42778..93c8eabbf 100644 --- a/ceph/spec/system/ceph_pool_spec.rb +++ b/ceph/spec/system/ceph_pool_spec.rb @@ -46,7 +46,7 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end @@ -66,7 +66,7 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end @@ -80,27 +80,27 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end shell 'ceph osd pool get volumes pg_num' do |r| - r.stdout.should =~ /pg_num: 64/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/pg_num: 64/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph osd pool get volumes pgp_num' do |r| - r.stdout.should =~ /pgp_num: 64/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/pgp_num: 64/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph osd pool get volumes size' do |r| - r.stdout.should =~ /size: 3/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/size: 3/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end @@ -115,9 +115,9 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end pp2 = <<-EOS @@ -127,15 +127,15 @@ class { 'ceph::repo': EOS puppet_apply(pp2) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should == 0 + expect(r.exit_code).to eq(0) end shell 'ceph osd lspools | grep volumes' do |r| - r.stdout.should be_empty - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end diff --git a/ceph/spec/system/ceph_profile_base_spec.rb b/ceph/spec/system/ceph_profile_base_spec.rb index 13a7d9608..4f27becd3 100644 --- a/ceph/spec/system/ceph_profile_base_spec.rb +++ b/ceph/spec/system/ceph_profile_base_spec.rb @@ -54,7 +54,7 @@ class { 'ceph::repo': machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -93,21 +93,21 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'cat /etc/ceph/ceph.conf' do |r| - r.stdout.should =~ /#{fsid}/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/#{fsid}/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell querycommand do |r| - r.stdout.should =~ /#{queryresult}/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/#{queryresult}/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end end diff --git a/ceph/spec/system/ceph_profile_client_spec.rb b/ceph/spec/system/ceph_profile_client_spec.rb index 435484c31..ce7d47d71 100644 --- a/ceph/spec/system/ceph_profile_client_spec.rb +++ b/ceph/spec/system/ceph_profile_client_spec.rb @@ -62,7 +62,7 @@ class { 'ceph::repo': machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -104,37 +104,37 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons .* quorum 0 first/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons .* quorum 0 first/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph -n client.volumes -s' do |r| - r.stdout.should =~ /1 mons .* quorum 0 first/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons .* quorum 0 first/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph auth list' do |r| - r.stdout.should =~ /#{admin_key}/ - r.exit_code.should be_zero + expect(r.stdout).to match(/#{admin_key}/) + expect(r.exit_code).to be_zero end shell 'ceph auth list' do |r| - r.stdout.should =~ /#{volumes_key}/ - r.exit_code.should be_zero + expect(r.stdout).to match(/#{volumes_key}/) + expect(r.exit_code).to be_zero end end it 'should uninstall one monitor' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -201,31 +201,31 @@ class { 'ceph::repo': end puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end ['first', 'second'].each do |vm| if vm == "first" shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons .* quorum 0 first/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons .* quorum 0 first/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph auth list' do |r| - r.stdout.should =~ /#{admin_key}/ - r.exit_code.should be_zero + expect(r.stdout).to match(/#{admin_key}/) + expect(r.exit_code).to be_zero end end if vm == "second" shell 'ceph -n client.volumes -s' do |r| - r.stdout.should =~ /1 mons .* quorum 0 first/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons .* quorum 0 first/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end end @@ -234,7 +234,7 @@ class { 'ceph::repo': it 'should uninstall one monitor' do [ 'second', 'first' ].each do |vm| puppet_apply(:node => vm, :code => purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end diff --git a/ceph/spec/system/ceph_profile_mon_spec.rb b/ceph/spec/system/ceph_profile_mon_spec.rb index 69fc2658a..68541fb3b 100644 --- a/ceph/spec/system/ceph_profile_mon_spec.rb +++ b/ceph/spec/system/ceph_profile_mon_spec.rb @@ -48,7 +48,7 @@ class { 'ceph::repo': machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -76,15 +76,15 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons .* quorum 0 first/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons .* quorum 0 first/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end @@ -96,7 +96,7 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end osfamily = facter.facts['osfamily'] @@ -104,16 +104,16 @@ class { 'ceph::repo': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.first/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.first/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.first' do |r| - r.stdout.should =~ /mon.first not found/ - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to match(/mon.first not found/) + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end end @@ -150,20 +150,20 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons .* quorum 0 first/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons .* quorum 0 first/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph auth list' do |r| - r.stdout.should =~ /#{admin_key}/ - r.exit_code.should be_zero + expect(r.stdout).to match(/#{admin_key}/) + expect(r.exit_code).to be_zero end end @@ -175,7 +175,7 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end osfamily = facter.facts['osfamily'] @@ -183,16 +183,16 @@ class { 'ceph::repo': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.first/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.first/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.first' do |r| - r.stdout.should =~ /mon.first not found/ - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to match(/mon.first not found/) + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end end @@ -232,20 +232,20 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons .* quorum 0 first/ + expect(r.stdout).to match(/1 mons .* quorum 0 first/) - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph auth list' do |r| - r.stdout.should =~ /#{admin_key}/ - r.exit_code.should be_zero + expect(r.stdout).to match(/#{admin_key}/) + expect(r.exit_code).to be_zero end end @@ -257,7 +257,7 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end osfamily = facter.facts['osfamily'] @@ -265,16 +265,16 @@ class { 'ceph::repo': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.first/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.first/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.first' do |r| - r.stdout.should =~ /mon.first not found/ - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to match(/mon.first not found/) + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end end @@ -282,7 +282,7 @@ class { 'ceph::repo': describe 'on two hosts' do it 'should be two hosts' do - machines.size.should == 2 + expect(machines.size).to eq(2) end it 'should install two monitors' do @@ -309,16 +309,16 @@ class { 'ceph::repo': EOS puppet_apply(:node => mon, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end shell 'ceph -s' do |r| - r.stdout.should =~ /2 mons .* quorum 0,1 first,second/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/2 mons .* quorum 0,1 first,second/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end @@ -331,7 +331,7 @@ class { 'ceph::repo': EOS puppet_apply(:node => mon, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end osfamily = facter.facts['osfamily'] @@ -339,16 +339,16 @@ class { 'ceph::repo': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell "status ceph-mon id=#{mon}" do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.#{mon}/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.#{mon}/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell "service ceph status mon.#{mon}" do |r| - r.stdout.should =~ /mon.#{mon} not found/ - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to match(/mon.#{mon} not found/) + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end end diff --git a/ceph/spec/system/ceph_profile_osd_spec.rb b/ceph/spec/system/ceph_profile_osd_spec.rb index a2de9c54f..dd1da3715 100644 --- a/ceph/spec/system/ceph_profile_osd_spec.rb +++ b/ceph/spec/system/ceph_profile_osd_spec.rb @@ -61,7 +61,7 @@ class { 'ceph::repo': machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -90,21 +90,21 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -114,18 +114,18 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell 'ceph-disk zap /dev/sdb' end it 'should uninstall one monitor' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end @@ -153,21 +153,21 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -175,18 +175,18 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell 'ceph-disk zap /dev/sdb' end it 'should uninstall one monitor' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end @@ -225,21 +225,21 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -247,11 +247,11 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell 'ceph-disk zap /dev/sdb' @@ -259,7 +259,7 @@ class { 'ceph::repo': it 'should uninstall one monitor and all packages' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -296,28 +296,28 @@ class { 'ceph::repo': end puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons .* quorum 0 first/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons .* quorum 0 first/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd on second host' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -325,23 +325,23 @@ class { 'ceph::repo': EOS puppet_apply(:node => 'second', :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell(:node => 'second', :command => 'ceph-disk zap /dev/sdb') puppet_apply(:node => 'second', :code => purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end it 'should uninstall one monitor on first host' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end @@ -388,28 +388,28 @@ class { 'ceph::repo': end puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons .* quorum 0 first/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons .* quorum 0 first/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd on second host' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -420,23 +420,23 @@ class { '::ceph::profile::client': } -> EOS puppet_apply(:node => 'second', :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell(:node => 'second', :command => 'ceph-disk zap /dev/sdb') puppet_apply(:node => 'second', :code => purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end it 'should uninstall one monitor' do puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end diff --git a/ceph/spec/system/ceph_repo_spec.rb b/ceph/spec/system/ceph_repo_spec.rb index c20dfcb03..2d43f08f9 100644 --- a/ceph/spec/system/ceph_repo_spec.rb +++ b/ceph/spec/system/ceph_repo_spec.rb @@ -46,7 +46,7 @@ class { 'ceph::repo': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end @@ -54,33 +54,33 @@ class { 'ceph::repo': if osfamily == 'Debian' shell 'apt-cache policy ceph' do |r| - r.stdout.should_not =~ /ceph.com/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).not_to match(/ceph.com/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'apt-cache policy curl' do |r| - r.stdout.should_not =~ /ceph.com/ - r.exit_code.should be_zero + expect(r.stdout).not_to match(/ceph.com/) + expect(r.exit_code).to be_zero end shell 'apt-cache policy libapache2-mod-fastcgi' do |r| - r.stdout.should_not =~ /ceph.com/ - r.exit_code.should be_zero + expect(r.stdout).not_to match(/ceph.com/) + expect(r.exit_code).to be_zero end end if osfamily == 'RedHat' shell 'yum info ceph' do |r| - r.stdout.should_not =~ /ceph.com/ - r.stderr.should =~ /Error: No matching Packages to list/ - r.exit_code.should_not be_zero + expect(r.stdout).not_to match(/ceph.com/) + expect(r.stderr).to match(/Error: No matching Packages to list/) + expect(r.exit_code).not_to be_zero end shell 'yum info qemu-kvm' do |r| - r.stdout.should_not =~ /Repo.*ext-ceph-extras/ - r.exit_code.should be_zero + expect(r.stdout).not_to match(/Repo.*ext-ceph-extras/) + expect(r.exit_code).to be_zero end shell 'yum info mod_fastcgi' do |r| - r.stdout.should_not =~ /Repo.*ext-ceph-fastcgi/ - r.stderr.should =~ /Error: No matching Packages to list/ - r.exit_code.should_not be_zero + expect(r.stdout).not_to match(/Repo.*ext-ceph-fastcgi/) + expect(r.stderr).to match(/Error: No matching Packages to list/) + expect(r.exit_code).not_to be_zero end end end @@ -116,28 +116,28 @@ class { 'ceph::repo': # Run it twice and test for idempotency puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell querycommand do |r| - r.stdout.should =~ /#{queryresult}/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/#{queryresult}/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end # Test extras is not enabled if osfamily == 'Debian' shell 'apt-cache policy curl' do |r| - r.stdout.should_not =~ /ceph\.com.*ceph-extras/ - r.exit_code.should be_zero + expect(r.stdout).not_to match(/ceph\.com.*ceph-extras/) + expect(r.exit_code).to be_zero end end if osfamily == 'RedHat' shell 'yum info qemu-kvm' do |r| - r.stdout.should_not =~ /Repo.*ext-ceph-extras/ - r.exit_code.should be_zero + expect(r.stdout).not_to match(/Repo.*ext-ceph-extras/) + expect(r.exit_code).to be_zero end end @@ -151,23 +151,23 @@ class { 'ceph::repo': # Run it twice and test for idempotency puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end if osfamily == 'Debian' shell querycommand do |r| - r.stdout.should_not =~ /ceph.com/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).not_to match(/ceph.com/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end if osfamily == 'RedHat' shell querycommand do |r| - r.stdout.should_not =~ /ceph.com/ - r.stderr.should =~ /Error: No matching Packages to list/ - r.exit_code.should_not be_zero + expect(r.stdout).not_to match(/ceph.com/) + expect(r.stderr).to match(/Error: No matching Packages to list/) + expect(r.exit_code).not_to be_zero end end end @@ -184,24 +184,24 @@ class { 'ceph::repo': # Run it twice and test for idempotency puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end # Test for a package in ceph-extras (curl/qemu-kvm) if osfamily == 'Debian' shell 'apt-cache policy curl' do |r| - r.stdout.should =~ /ceph\.com.*ceph-extras/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/ceph\.com.*ceph-extras/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end if osfamily == 'RedHat' shell 'yum info qemu-kvm' do |r| - r.stdout.should =~ /Repo.*ext-ceph-extras/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/Repo.*ext-ceph-extras/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end @@ -216,21 +216,21 @@ class { 'ceph::repo': # Run it twice and test for idempotency puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end if osfamily == 'Debian' shell 'apt-cache policy curl' do |r| - r.stdout.should_not =~ /ceph.com/ - r.exit_code.should be_zero + expect(r.stdout).not_to match(/ceph.com/) + expect(r.exit_code).to be_zero end end if osfamily == 'RedHat' shell 'yum info qemu-kvm' do |r| - r.stdout.should_not =~ /Repo.*ext-ceph-extras/ - r.exit_code.should be_zero + expect(r.stdout).not_to match(/Repo.*ext-ceph-extras/) + expect(r.exit_code).to be_zero end end end @@ -247,24 +247,24 @@ class { 'ceph::repo': # Run it twice and test for idempotency puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end # Test fastcgi in ceph-fastcgi if osfamily == 'Debian' shell 'apt-cache policy libapache2-mod-fastcgi' do |r| - r.stdout.should =~ /ceph.com/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/ceph.com/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end if osfamily == 'RedHat' shell 'yum info mod_fastcgi' do |r| - r.stdout.should =~ /Repo.*ext-ceph-fastcgi/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/Repo.*ext-ceph-fastcgi/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end @@ -279,22 +279,22 @@ class { 'ceph::repo': # Run it twice and test for idempotency puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end if osfamily == 'Debian' shell 'apt-cache policy libapache2-mod-fastcgi' do |r| - r.stdout.should_not =~ /ceph.com/ - r.exit_code.should be_zero + expect(r.stdout).not_to match(/ceph.com/) + expect(r.exit_code).to be_zero end end if osfamily == 'RedHat' shell 'yum info mod_fastcgi' do |r| - r.stdout.should_not =~ /Repo.*ext-ceph-fastcgi/ - r.stderr.should =~ /Error: No matching Packages to list/ - r.exit_code.should_not be_zero + expect(r.stdout).not_to match(/Repo.*ext-ceph-fastcgi/) + expect(r.stderr).to match(/Error: No matching Packages to list/) + expect(r.exit_code).not_to be_zero end end end diff --git a/ceph/spec/system/ceph_rgw_apache_spec.rb b/ceph/spec/system/ceph_rgw_apache_spec.rb index dcdbad0fe..fde061786 100644 --- a/ceph/spec/system/ceph_rgw_apache_spec.rb +++ b/ceph/spec/system/ceph_rgw_apache_spec.rb @@ -20,7 +20,7 @@ describe 'ceph::rgw::apache' do - releases = ENV['RELEASES'] ? ENV['RELEASES'].split : [ 'dumpling', 'firefly', 'giant' ] + releases = ENV['RELEASES'] ? ENV['RELEASES'].split : [ 'dumpling', 'giant' ] fsid = 'a4807c9a-e76f-4666-a297-6d6cbc922e3a' mon_key ='AQCztJdSyNb0NBAASA2yPZPuwXeIQnDJ9O8gVw==' admin_key = 'AQA0TVRTsP/aHxAAFBvntu1dSEJHxtJeFFrRsg==' @@ -109,36 +109,36 @@ class { 'ceph': } puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell servicequery[osfamily] do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell 'radosgw-admin user create --uid=puppet --display-name=puppet-user' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell 'radosgw-admin subuser create --uid=puppet --subuser=puppet:swift --access=full' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end # need to create subuser key twice, due to http://tracker.ceph.com/issues/9155 shell "radosgw-admin key create --subuser=puppet:swift --key-type=swift --secret='123456'" do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell "radosgw-admin key create --subuser=puppet:swift --key-type=swift --secret='123456'" do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end shell 'curl -i -H "X-Auth-User: puppet:swift" -H "X-Auth-Key: 123456" http://first/auth/v1.0/' do |r| - r.exit_code.should be_zero - r.stdout.should =~ /HTTP\/1\.1 204 No Content/ - r.stdout.should_not =~ /401 Unauthorized/ + expect(r.exit_code).to be_zero + expect(r.stdout).to match(/HTTP\/1\.1 204 No Content/) + expect(r.stdout).not_to match(/401 Unauthorized/) end end @@ -151,7 +151,7 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph-disk zap /dev/sdb' @@ -196,7 +196,7 @@ class { 'apache': EOS puppet_apply(purge) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end diff --git a/ceph/spec/system/ceph_usecases_spec.rb b/ceph/spec/system/ceph_usecases_spec.rb index 511e28825..0eef0ec52 100644 --- a/ceph/spec/system/ceph_usecases_spec.rb +++ b/ceph/spec/system/ceph_usecases_spec.rb @@ -49,27 +49,27 @@ class { 'ceph': puppet_apply(pp) do |r| # due to the generate() the above is not idempotent # so we don't run twice as usual - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'sleep 30' # we need to wait a bit until the OSD is up shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons at/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons at/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -79,12 +79,12 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell 'ceph osd tree | grep DNE' do |r| - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end @@ -96,7 +96,7 @@ class { 'ceph': EOS puppet_apply(pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end osfamily = facter.facts['osfamily'] @@ -104,16 +104,16 @@ class { 'ceph': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=a' do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.a/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.a/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.a' do |r| - r.stdout.should =~ /mon.a not found/ - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to match(/mon.a not found/) + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end end @@ -130,7 +130,7 @@ class { 'ceph::repo': machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -193,28 +193,28 @@ class { 'ceph': machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons at/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons at/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall one osd' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -224,13 +224,13 @@ class { 'ceph': EOS puppet_apply(:node => 'second', :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell(:node => 'second', :command => "test -b /dev/sdb && ceph-disk zap /dev/sdb") shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end end @@ -242,7 +242,7 @@ class { 'ceph': EOS puppet_apply(:node => 'first', :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end osfamily = facter.facts['osfamily'] @@ -250,16 +250,16 @@ class { 'ceph': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.first/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.first/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.first' do |r| - r.stdout.should =~ /mon.first not found/ - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to match(/mon.first not found/) + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end end @@ -276,7 +276,7 @@ class { 'ceph::repo': machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end @@ -316,29 +316,29 @@ class { 'ceph': machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) r.refresh - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end shell 'ceph -s' do |r| - r.stdout.should =~ /1 mons at/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/1 mons at/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end shell 'ceph osd tree' do |r| - r.stdout.should =~ /osd.0/ - r.stdout.should =~ /osd.1/ - r.stderr.should be_empty - r.exit_code.should be_zero + expect(r.stdout).to match(/osd.0/) + expect(r.stdout).to match(/osd.1/) + expect(r.stderr).to be_empty + expect(r.exit_code).to be_zero end end it 'should uninstall two OSDs' do shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should_not be_zero + expect(r.exit_code).not_to be_zero end pp = <<-EOS @@ -349,14 +349,14 @@ class { 'ceph': machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end shell(:node => vm, :command => "test -b /dev/sdb && ceph-disk zap /dev/sdb") end shell 'ceph osd tree | grep DNE' do |r| - r.exit_code.should be_zero + expect(r.exit_code).to be_zero end end @@ -368,7 +368,7 @@ class { 'ceph': EOS puppet_apply(:node => 'first', :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end osfamily = facter.facts['osfamily'] @@ -376,16 +376,16 @@ class { 'ceph': if osfamily == 'Debian' && operatingsystem == 'Ubuntu' shell 'status ceph-mon id=first' do |r| - r.stdout.should be_empty - r.stderr.should =~ /Unknown instance: ceph.first/ - r.exit_code.should_not be_zero + expect(r.stdout).to be_empty + expect(r.stderr).to match(/Unknown instance: ceph.first/) + expect(r.exit_code).not_to be_zero end end if osfamily == 'RedHat' shell 'service ceph status mon.first' do |r| - r.stdout.should =~ /mon.first not found/ - r.stderr.should be_empty - r.exit_code.should_not be_zero + expect(r.stdout).to match(/mon.first not found/) + expect(r.stderr).to be_empty + expect(r.exit_code).not_to be_zero end end end @@ -402,7 +402,7 @@ class { 'ceph::repo': machines.each do |vm| puppet_apply(:node => vm, :code => pp) do |r| - r.exit_code.should_not == 1 + expect(r.exit_code).not_to eq(1) end end end diff --git a/ceph/spec/unit/provider/ceph_config/ini_setting_spec.rb b/ceph/spec/unit/provider/ceph_config/ini_setting_spec.rb index f034d8857..8a944c691 100644 --- a/ceph/spec/unit/provider/ceph_config/ini_setting_spec.rb +++ b/ceph/spec/unit/provider/ceph_config/ini_setting_spec.rb @@ -43,16 +43,16 @@ } } def validate(expected, tmpfile = tmpfile) - File.read(tmpfile).should == expected + expect(File.read(tmpfile)).to eq(expected) end it 'should create keys = value and ensure space around equals' do resource = Puppet::Type::Ceph_config.new(params.merge( :name => 'global/ceph_is_foo', :value => 'bar')) provider = provider_class.new(resource) - provider.exists?.should be_falsey + expect(provider.exists?).to be_falsey provider.create - provider.exists?.should be_truthy + expect(provider.exists?).to be_truthy validate(<<-EOS [global] @@ -65,7 +65,7 @@ def validate(expected, tmpfile = tmpfile) resource = Puppet::Type::Ceph_config.new( :name => 'global/ceph_is_foo', :value => 'bar') provider = provider_class.new(resource) - provider.file_path.should == '/etc/ceph/ceph.conf' + expect(provider.file_path).to eq('/etc/ceph/ceph.conf') end end diff --git a/ceph/spec/unit/type/ceph_config_spec.rb b/ceph/spec/unit/type/ceph_config_spec.rb index 01e4af85b..c78dfb684 100644 --- a/ceph/spec/unit/type/ceph_config_spec.rb +++ b/ceph/spec/unit/type/ceph_config_spec.rb @@ -26,16 +26,16 @@ it 'should work bascily' do @ceph_config[:value] = 'max' - @ceph_config[:value].should == 'max' + expect(@ceph_config[:value]).to eq('max') end it 'should convert true to True' do @ceph_config[:value] = 'tRuE' - @ceph_config[:value].should == 'True' + expect(@ceph_config[:value]).to eq('True') end it 'should convert false to False' do @ceph_config[:value] = 'fAlSe' - @ceph_config[:value].should == 'False' + expect(@ceph_config[:value]).to eq('False') end end \ No newline at end of file diff --git a/cinder/CHANGELOG.md b/cinder/CHANGELOG.md index d087182b2..34e0ab23d 100644 --- a/cinder/CHANGELOG.md +++ b/cinder/CHANGELOG.md @@ -1,3 +1,52 @@ +##2015-07-08 - 6.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Kilo. + +####Backwards-incompatible changes +- Iscsi: Change default $volume_driver +- Switch to TLSv1 as SSLv3 is considered insecure and is disabled by default +- Remove POSIX users, groups, and file modes +- Move rabbit/kombu settings to oslo_messaging_rabbit section +- Also removed deprecated parameters + +####Features +- Puppet 4.x support +- Refactorise Keystone resources management +- Add an option to not configure RabbitMQ service +- Run db_sync when upgrading packages +- Makes kombu_ssl_* parameters optional when rabbit_use_ssl => true +- Adds ability to override service name for service catalog +- Support the enable_v*_api settings +- Support iSER driver within the ISCSITarget flow +- ISCSI: Allow one to specify volumes_dir path +- Backends: Add an extra_options door +- Support identity_uri and auth_uri properly +- Make scheduler_driver option can be cleaned up +- Tag all Cinder packages +- Adds OracleLinux support +- Create a sync_db boolean for Cinder +- Update NetApp params for Kilo +- Add nfs_mount_options variable when backend is NetApp +- Add support for NFS Backup +- Decouple $sync_db from $enabled +- Add backup compression parameter +- Introduce public_url, internal_url and admin_url +- Added support for DellStorageCenter ISCSI cinder driver +- Add cinder::scheduler::filter for managing scheduler.filter +- NetApp: use $name for configuration group name (allows to run multiple NetApp + backends) +- Lint documentation parameters +- HP 3par iscsi backend module +- MySQL: change default MySQL collate to utf8_general_ci + +####Bugfixes +- Fix db_sync dependencies + +####Maintenance +- Acceptance tests with Beaker +- Fix spec tests for RSpec 3.x and Puppet 4.x + ##2015-06-17 - 5.1.0 ###Summary diff --git a/cinder/README.md b/cinder/README.md index 538178c12..937488a3a 100644 --- a/cinder/README.md +++ b/cinder/README.md @@ -1,7 +1,7 @@ cinder ======= -5.1.0 - 2014.2 - Juno +6.0.0 - 2015.1 - Kilo #### Table of Contents diff --git a/cinder/manifests/api.pp b/cinder/manifests/api.pp index 828902ed2..ec861c9ea 100644 --- a/cinder/manifests/api.pp +++ b/cinder/manifests/api.pp @@ -40,6 +40,15 @@ # requests. For example, boot-from-volume. # Defaults to undef. # +# [*nova_catalog_info*] +# (optional) Match this value when searching for nova in the service +# catalog. +# Defaults to 'compute:Compute Service:publicURL' +# +# [*nova_catalog_admin_info*] +# (optional) Same as nova_catalog_info, but for admin endpoint. +# Defaults to 'compute:Compute Service:adminURL' +# # [*keystone_auth_admin_prefix*] # (optional) DEPRECATED The admin_prefix used to admin endpoint of the auth # host. This allow admin auth URIs like http://auth_host:35357/keystone. @@ -131,6 +140,8 @@ $auth_uri = false, $identity_uri = false, $os_region_name = undef, + $nova_catalog_info = 'compute:Compute Service:publicURL', + $nova_catalog_admin_info = 'compute:Compute Service:adminURL', $service_workers = $::processorcount, $package_ensure = 'present', $bind_host = '0.0.0.0', @@ -215,6 +226,11 @@ } } + cinder_config { + 'DEFAULT/nova_catalog_info': value => $nova_catalog_info; + 'DEFAULT/nova_catalog_admin_info': value => $nova_catalog_admin_info; + } + if $keystone_auth_uri and $auth_uri { fail('both keystone_auth_uri and auth_uri are set and they have the same meaning') } diff --git a/cinder/manifests/ceilometer.pp b/cinder/manifests/ceilometer.pp index 813ea687d..c13374b35 100644 --- a/cinder/manifests/ceilometer.pp +++ b/cinder/manifests/ceilometer.pp @@ -7,16 +7,17 @@ # # [*notification_driver*] # (option) Driver or drivers to handle sending notifications. -# Notice: rabbit_notifier has been deprecated in Grizzly, use rpc_notifier instead. +# The default value of 'messagingv2' is for enabling notifications via +# oslo.messaging. 'cinder.openstack.common.notifier.rpc_notifier' is the +# backwards compatible option that will be deprecated. Prior to Grizzly, +# 'cinder.openstack.common.notifier.rabbit_notifier' was used. oslo.messaging +# was adopted in icehouse/juno. See LP#1425713. # - - class cinder::ceilometer ( - $notification_driver = 'cinder.openstack.common.notifier.rpc_notifier' + $notification_driver = 'messagingv2', ) { cinder_config { - 'DEFAULT/notification_driver': value => $notification_driver; + 'DEFAULT/notification_driver': value => $notification_driver; } } - diff --git a/cinder/manifests/cron/db_purge.pp b/cinder/manifests/cron/db_purge.pp new file mode 100644 index 000000000..559f7848e --- /dev/null +++ b/cinder/manifests/cron/db_purge.pp @@ -0,0 +1,76 @@ +# +# Copyright (C) 2015 Red Hat Inc. +# +# Author: Martin Magr +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# == Class: cinder::cron::db_purge +# +# Move deleted instances to another table that you don't have to backup +# unless you have data retention policies. +# +# === Parameters +# +# [*minute*] +# (optional) Defaults to '1'. +# +# [*hour*] +# (optional) Defaults to '0'. +# +# [*monthday*] +# (optional) Defaults to '*'. +# +# [*month*] +# (optional) Defaults to '*'. +# +# [*weekday*] +# (optional) Defaults to '*'. +# +# [*user*] +# (optional) User with access to cinder files. +# Defaults to 'cinder'. +# +# [*age*] +# (optional) Number of days prior to today for deletion, +# e.g. value 60 means to purge deleted rows that have the "deleted_at" +# column greater than 60 days ago. +# Defaults to 30 +# +# [*destination*] +# (optional) Path to file to which rows should be archived +# Defaults to '/var/log/cinder/cinder-rowsflush.log'. +# +class cinder::cron::db_purge ( + $minute = 1, + $hour = 0, + $monthday = '*', + $month = '*', + $weekday = '*', + $user = 'cinder', + $age = 30, + $destination = '/var/log/cinder/cinder-rowsflush.log' +) { + + cron { 'cinder-manage db purge': + command => "cinder-manage db purge ${age} >>${destination} 2>&1", + environment => 'PATH=/bin:/usr/bin:/usr/sbin SHELL=/bin/sh', + user => $user, + minute => $minute, + hour => $hour, + monthday => $monthday, + month => $month, + weekday => $weekday, + require => Package['cinder'], + } +} diff --git a/cinder/manifests/init.pp b/cinder/manifests/init.pp index 7e11e3394..c7e9d833c 100644 --- a/cinder/manifests/init.pp +++ b/cinder/manifests/init.pp @@ -84,6 +84,21 @@ # (Optional) Virtual_host to use. # Defaults to '/' # +# [*rabbit_heartbeat_timeout_threshold*] +# (optional) Number of seconds after which the RabbitMQ broker is considered +# down if the heartbeat keepalive fails. Any value >0 enables heartbeats. +# Heartbeating helps to ensure the TCP connection to RabbitMQ isn't silently +# closed, resulting in missed or lost messages from the queue. +# (Requires kombu >= 3.0.7 and amqp >= 1.4.0) +# Defaults to 0 +# +# [*rabbit_heartbeat_rate*] +# (optional) How often during the rabbit_heartbeat_timeout_threshold period to +# check the heartbeat on RabbitMQ connection. (i.e. rabbit_heartbeat_rate=2 +# when rabbit_heartbeat_timeout_threshold=60, the heartbeat will be checked +# every 30 seconds. +# Defaults to 2 +# # [*rabbit_use_ssl*] # (optional) Connect over SSL for RabbitMQ # Defaults to false @@ -212,58 +227,60 @@ # DEPRECATED. Does nothing. # class cinder ( - $database_connection = 'sqlite:////var/lib/cinder/cinder.sqlite', - $database_idle_timeout = '3600', - $database_min_pool_size = '1', - $database_max_pool_size = undef, - $database_max_retries = '10', - $database_retry_interval = '10', - $database_max_overflow = undef, - $rpc_backend = 'cinder.openstack.common.rpc.impl_kombu', - $control_exchange = 'openstack', - $rabbit_host = '127.0.0.1', - $rabbit_port = 5672, - $rabbit_hosts = false, - $rabbit_virtual_host = '/', - $rabbit_userid = 'guest', - $rabbit_password = false, - $rabbit_use_ssl = false, - $kombu_ssl_ca_certs = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_keyfile = undef, - $kombu_ssl_version = 'TLSv1', - $amqp_durable_queues = false, - $qpid_hostname = 'localhost', - $qpid_port = '5672', - $qpid_username = 'guest', - $qpid_password = false, - $qpid_sasl_mechanisms = false, - $qpid_reconnect = true, - $qpid_reconnect_timeout = 0, - $qpid_reconnect_limit = 0, - $qpid_reconnect_interval_min = 0, - $qpid_reconnect_interval_max = 0, - $qpid_reconnect_interval = 0, - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, - $package_ensure = 'present', - $use_ssl = false, - $ca_file = false, - $cert_file = false, - $key_file = false, - $api_paste_config = '/etc/cinder/api-paste.ini', - $use_syslog = false, - $log_facility = 'LOG_USER', - $log_dir = '/var/log/cinder', - $verbose = false, - $debug = false, - $storage_availability_zone = 'nova', - $default_availability_zone = false, - $enable_v1_api = true, - $enable_v2_api = true, + $database_connection = 'sqlite:////var/lib/cinder/cinder.sqlite', + $database_idle_timeout = '3600', + $database_min_pool_size = '1', + $database_max_pool_size = undef, + $database_max_retries = '10', + $database_retry_interval = '10', + $database_max_overflow = undef, + $rpc_backend = 'cinder.openstack.common.rpc.impl_kombu', + $control_exchange = 'openstack', + $rabbit_host = '127.0.0.1', + $rabbit_port = 5672, + $rabbit_hosts = false, + $rabbit_virtual_host = '/', + $rabbit_heartbeat_timeout_threshold = 0, + $rabbit_heartbeat_rate = 2, + $rabbit_userid = 'guest', + $rabbit_password = false, + $rabbit_use_ssl = false, + $kombu_ssl_ca_certs = undef, + $kombu_ssl_certfile = undef, + $kombu_ssl_keyfile = undef, + $kombu_ssl_version = 'TLSv1', + $amqp_durable_queues = false, + $qpid_hostname = 'localhost', + $qpid_port = '5672', + $qpid_username = 'guest', + $qpid_password = false, + $qpid_sasl_mechanisms = false, + $qpid_reconnect = true, + $qpid_reconnect_timeout = 0, + $qpid_reconnect_limit = 0, + $qpid_reconnect_interval_min = 0, + $qpid_reconnect_interval_max = 0, + $qpid_reconnect_interval = 0, + $qpid_heartbeat = 60, + $qpid_protocol = 'tcp', + $qpid_tcp_nodelay = true, + $package_ensure = 'present', + $use_ssl = false, + $ca_file = false, + $cert_file = false, + $key_file = false, + $api_paste_config = '/etc/cinder/api-paste.ini', + $use_syslog = false, + $log_facility = 'LOG_USER', + $log_dir = '/var/log/cinder', + $verbose = false, + $debug = false, + $storage_availability_zone = 'nova', + $default_availability_zone = false, + $enable_v1_api = true, + $enable_v2_api = true, # DEPRECATED PARAMETERS - $mysql_module = undef, + $mysql_module = undef, ) { include ::cinder::params @@ -302,12 +319,14 @@ } cinder_config { - 'oslo_messaging_rabbit/rabbit_password': value => $rabbit_password, secret => true; - 'oslo_messaging_rabbit/rabbit_userid': value => $rabbit_userid; - 'oslo_messaging_rabbit/rabbit_virtual_host': value => $rabbit_virtual_host; - 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl; - 'DEFAULT/control_exchange': value => $control_exchange; - 'DEFAULT/amqp_durable_queues': value => $amqp_durable_queues; + 'oslo_messaging_rabbit/rabbit_password': value => $rabbit_password, secret => true; + 'oslo_messaging_rabbit/rabbit_userid': value => $rabbit_userid; + 'oslo_messaging_rabbit/rabbit_virtual_host': value => $rabbit_virtual_host; + 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl; + 'oslo_messaging_rabbit/heartbeat_timeout_threshold': value => $rabbit_heartbeat_timeout_threshold; + 'oslo_messaging_rabbit/heartbeat_rate': value => $rabbit_heartbeat_rate; + 'DEFAULT/control_exchange': value => $control_exchange; + 'DEFAULT/amqp_durable_queues': value => $amqp_durable_queues; } if $rabbit_hosts { diff --git a/cinder/metadata.json b/cinder/metadata.json index 3b718d866..438e83f27 100644 --- a/cinder/metadata.json +++ b/cinder/metadata.json @@ -1,6 +1,6 @@ { - "name": "stackforge-cinder", - "version": "5.1.0", + "name": "openstack-cinder", + "version": "6.0.0", "author": "Puppet Labs and OpenStack Contributors", "summary": "Puppet module for OpenStack Cinder", "license": "Apache-2.0", @@ -33,9 +33,9 @@ "dependencies": [ { "name": "dprince/qpid", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, { "name": "puppetlabs/rabbitmq", "version_requirement": ">=2.0.2 <6.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0 <6.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } ] } diff --git a/cinder/spec/acceptance/basic_cinder_spec.rb b/cinder/spec/acceptance/basic_cinder_spec.rb index f12c9e03d..9b5595b79 100644 --- a/cinder/spec/acceptance/basic_cinder_spec.rb +++ b/cinder/spec/acceptance/basic_cinder_spec.rb @@ -102,6 +102,7 @@ class { '::cinder::quota': } class { '::cinder::scheduler': } class { '::cinder::scheduler::filter': } class { '::cinder::volume': } + class { '::cinder::cron::db_purge': } # TODO: create a backend and spawn a volume EOS @@ -115,5 +116,10 @@ class { '::cinder::volume': } it { is_expected.to be_listening.with('tcp') } end + describe cron do + it { is_expected.to have_entry('1 0 * * * cinder-manage db purge 30 >>/var/log/cinder/cinder-rowsflush.log 2>&1').with_user('cinder') } + end + + end end diff --git a/cinder/spec/classes/cinder_api_spec.rb b/cinder/spec/classes/cinder_api_spec.rb index e901ec9a0..7be1b6552 100644 --- a/cinder/spec/classes/cinder_api_spec.rb +++ b/cinder/spec/classes/cinder_api_spec.rb @@ -30,6 +30,12 @@ is_expected.to contain_cinder_config('DEFAULT/osapi_volume_workers').with( :value => '8' ) + is_expected.to contain_cinder_config('DEFAULT/nova_catalog_info').with( + :value => 'compute:Compute Service:publicURL' + ) + is_expected.to contain_cinder_config('DEFAULT/nova_catalog_admin_info').with( + :value => 'compute:Compute Service:adminURL' + ) is_expected.to contain_cinder_config('DEFAULT/default_volume_type').with( :ensure => 'absent' ) @@ -73,6 +79,17 @@ end end + describe 'with a custom nova_catalog params' do + let :params do + req_params.merge({ + 'nova_catalog_admin_info' => 'compute:nova:adminURL', + 'nova_catalog_info' => 'compute:nova:publicURL', + }) + end + it { is_expected.to contain_cinder_config('DEFAULT/nova_catalog_admin_info').with_value('compute:nova:adminURL') } + it { is_expected.to contain_cinder_config('DEFAULT/nova_catalog_info').with_value('compute:nova:publicURL') } + end + describe 'with a custom region for nova' do let :params do req_params.merge({'os_region_name' => 'MyRegion'}) diff --git a/cinder/spec/classes/cinder_ceilometer_spec.rb b/cinder/spec/classes/cinder_ceilometer_spec.rb index fdb9b49ee..1d268c1a7 100644 --- a/cinder/spec/classes/cinder_ceilometer_spec.rb +++ b/cinder/spec/classes/cinder_ceilometer_spec.rb @@ -5,7 +5,7 @@ describe 'with default parameters' do it 'contains default values' do is_expected.to contain_cinder_config('DEFAULT/notification_driver').with( - :value => 'cinder.openstack.common.notifier.rpc_notifier') + :value => 'messagingv2') end end end diff --git a/cinder/spec/classes/cinder_cron_db_purge_spec.rb b/cinder/spec/classes/cinder_cron_db_purge_spec.rb new file mode 100644 index 000000000..f78e9a7bc --- /dev/null +++ b/cinder/spec/classes/cinder_cron_db_purge_spec.rb @@ -0,0 +1,33 @@ +require 'spec_helper' + +describe 'cinder::cron::db_purge' do + + let :facts do + { :osfamily => 'RedHat' } + end + + let :params do + { :minute => 1, + :hour => 0, + :monthday => '*', + :month => '*', + :weekday => '*', + :user => 'cinder', + :age => '30', + :destination => '/var/log/cinder/cinder-rowsflush.log' } + end + + it 'configures a cron' do + is_expected.to contain_cron('cinder-manage db purge').with( + :command => "cinder-manage db purge #{params[:age]} >>#{params[:destination]} 2>&1", + :environment => 'PATH=/bin:/usr/bin:/usr/sbin SHELL=/bin/sh', + :user => params[:user], + :minute => params[:minute], + :hour => params[:hour], + :monthday => params[:monthday], + :month => params[:month], + :weekday => params[:weekday], + :require => 'Package[cinder]', + ) + end +end diff --git a/cinder/spec/classes/cinder_spec.rb b/cinder/spec/classes/cinder_spec.rb index 8bfc280f9..3f5d62f47 100644 --- a/cinder/spec/classes/cinder_spec.rb +++ b/cinder/spec/classes/cinder_spec.rb @@ -25,6 +25,8 @@ is_expected.to contain_cinder_config('oslo_messaging_rabbit/rabbit_hosts').with(:value => '127.0.0.1:5672') is_expected.to contain_cinder_config('oslo_messaging_rabbit/rabbit_ha_queues').with(:value => false) is_expected.to contain_cinder_config('oslo_messaging_rabbit/rabbit_virtual_host').with(:value => '/') + is_expected.to contain_cinder_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') + is_expected.to contain_cinder_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') is_expected.to contain_cinder_config('oslo_messaging_rabbit/rabbit_userid').with(:value => 'guest') is_expected.to contain_cinder_config('database/connection').with(:value => 'mysql://user:password@host/database', :secret => true) is_expected.to contain_cinder_config('database/idle_timeout').with(:value => '3600') @@ -68,6 +70,17 @@ end end + describe 'with rabbitmq heartbeats' do + let :params do + req_params.merge({'rabbit_heartbeat_timeout_threshold' => '60', 'rabbit_heartbeat_rate' => '10'}) + end + + it 'should contain heartbeat config' do + is_expected.to contain_cinder_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('60') + is_expected.to contain_cinder_config('oslo_messaging_rabbit/heartbeat_rate').with_value('10') + end + end + describe 'with qpid rpc supplied' do let :params do diff --git a/cinder/spec/spec_helper_acceptance.rb b/cinder/spec/spec_helper_acceptance.rb index 429e807c4..144b31e3f 100644 --- a/cinder/spec/spec_helper_acceptance.rb +++ b/cinder/spec/spec_helper_acceptance.rb @@ -38,7 +38,7 @@ zuul_clone_cmd += "git://git.openstack.org #{repo}" on host, zuul_clone_cmd else - on host, "git clone https://git.openstack.org/#{repo} #{repo}" + on host, "git clone -b stable/kilo https://git.openstack.org/#{repo} #{repo}" end on host, "ZUUL_REF=#{zuul_ref} ZUUL_BRANCH=#{zuul_branch} ZUUL_URL=#{zuul_url} bash #{repo}/install_modules.sh" diff --git a/galera/manifests/server.pp b/galera/manifests/server.pp index f4f630efe..5496ea2a2 100644 --- a/galera/manifests/server.pp +++ b/galera/manifests/server.pp @@ -121,11 +121,17 @@ warning("DEPRECATED: wsrep_bind_address is deprecated, you should use bind_address of mysql module") } - $wsrep_provider_options = wsrep_options({ - 'socket.ssl' => $wsrep_ssl, - 'socket.ssl_key' => $wsrep_ssl_key, - 'socket.ssl_cert' => $wsrep_ssl_cert, - }) + if $wsrep_ssl { + $wsrep_provider_options = wsrep_options({ + 'socket.ssl' => $wsrep_ssl, + 'socket.ssl_key' => $wsrep_ssl_key, + 'socket.ssl_cert' => $wsrep_ssl_cert, + }) + } else { + $wsrep_provider_options = wsrep_options({ + 'socket.ssl' => $wsrep_ssl, + }) + } $wsrep_debug = bool2num($debug) @@ -136,6 +142,7 @@ group => 'root', content => template('galera/wsrep.cnf.erb'), notify => Service['mysqld'], + require => Class['mysql::server::install'], } if $manage_service { diff --git a/glance/CHANGELOG.md b/glance/CHANGELOG.md index 1e69f7f3f..c3a1b33fc 100644 --- a/glance/CHANGELOG.md +++ b/glance/CHANGELOG.md @@ -1,3 +1,31 @@ +##2015-07-08 - 6.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Kilo. + +####Backwards-incompatible changes +- Move rabbit/kombu settings to oslo_messaging_rabbit section +- Remove sql_connection and sql_idle_timeout deprecated parameters +- api: change default pipeline +- Separate api and registry packages for Red Hat +- python-ceph no longer exists in el7, use python-rbd + +####Features +- Puppet 4.x support +- Refactorise Keystone resources management +- Migrate postgresql backend to use openstacklib::db::postgresql +- Add support for identity_uri +- Service Validation for Glance-API +- Create a sync_db boolean for Glance +- make service description configurable + +####Bugfixes +- Fix API/Registry ensure for Ubuntu + +####Maintenance +- Acceptance tests with Beaker +- Fix spec tests for RSpec 3.x and Puppet 4.x + ##2015-06-17 - 5.1.0 ###Summary diff --git a/glance/README.md b/glance/README.md index a980477ae..7d471435c 100644 --- a/glance/README.md +++ b/glance/README.md @@ -1,7 +1,7 @@ glance ======= -5.1.0 - 2014.2 - Juno +6.0.0 - 2015.1 - Kilo #### Table of Contents diff --git a/glance/manifests/backend/swift.pp b/glance/manifests/backend/swift.pp index 6065acff0..ea3a34b57 100644 --- a/glance/manifests/backend/swift.pp +++ b/glance/manifests/backend/swift.pp @@ -29,6 +29,9 @@ # == class: glance::backend::swift # [*swift_store_endpoint_type*] # Optional. Default: 'internalURL' # +# [*swift_store_region*] +# Optional. Default: undef +# class glance::backend::swift( $swift_store_user, $swift_store_key, @@ -37,7 +40,8 @@ # == class: glance::backend::swift $swift_store_auth_version = '2', $swift_store_large_object_size = '5120', $swift_store_create_container_on_put = false, - $swift_store_endpoint_type = 'internalURL' + $swift_store_endpoint_type = 'internalURL', + $swift_store_region = undef, ) { glance_api_config { @@ -45,6 +49,7 @@ # == class: glance::backend::swift 'glance_store/swift_store_user': value => $swift_store_user; 'glance_store/swift_store_key': value => $swift_store_key; 'glance_store/swift_store_auth_address': value => $swift_store_auth_address; + 'glance_store/swift_store_region': value => $swift_store_region; 'glance_store/swift_store_container': value => $swift_store_container; 'glance_store/swift_store_auth_version': value => $swift_store_auth_version; 'glance_store/swift_store_create_container_on_put': @@ -59,6 +64,7 @@ # == class: glance::backend::swift 'glance_store/swift_store_user': value => $swift_store_user; 'glance_store/swift_store_key': value => $swift_store_key; 'glance_store/swift_store_auth_address': value => $swift_store_auth_address; + 'glance_store/swift_store_region': value => $swift_store_region; 'glance_store/swift_store_container': value => $swift_store_container; 'glance_store/swift_store_auth_version': value => $swift_store_auth_version; 'glance_store/swift_store_create_container_on_put': diff --git a/glance/manifests/notify/rabbitmq.pp b/glance/manifests/notify/rabbitmq.pp index a55434926..87110e837 100644 --- a/glance/manifests/notify/rabbitmq.pp +++ b/glance/manifests/notify/rabbitmq.pp @@ -21,6 +21,21 @@ # [*rabbit_virtual_host*] # virtual_host to use. Optional. Defaults to '/' # +# [*rabbit_heartbeat_timeout_threshold*] +# (optional) Number of seconds after which the RabbitMQ broker is considered +# down if the heartbeat keepalive fails. Any value >0 enables heartbeats. +# Heartbeating helps to ensure the TCP connection to RabbitMQ isn't silently +# closed, resulting in missed or lost messages from the queue. +# (Requires kombu >= 3.0.7 and amqp >= 1.4.0) +# Defaults to 0 +# +# [*rabbit_heartbeat_rate*] +# (optional) How often during the rabbit_heartbeat_timeout_threshold period to +# check the heartbeat on RabbitMQ connection. (i.e. rabbit_heartbeat_rate=2 +# when rabbit_heartbeat_timeout_threshold=60, the heartbeat will be checked +# every 30 seconds. +# Defaults to 2 +# # [*rabbit_use_ssl*] # (optional) Connect over SSL for RabbitMQ # Defaults to false @@ -61,21 +76,23 @@ class glance::notify::rabbitmq( $rabbit_password, - $rabbit_userid = 'guest', - $rabbit_host = 'localhost', - $rabbit_port = '5672', - $rabbit_hosts = false, - $rabbit_virtual_host = '/', - $rabbit_use_ssl = false, - $kombu_ssl_ca_certs = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_keyfile = undef, - $kombu_ssl_version = 'TLSv1', - $rabbit_notification_exchange = 'glance', - $rabbit_notification_topic = 'notifications', - $rabbit_durable_queues = false, - $amqp_durable_queues = false, - $notification_driver = 'messaging', + $rabbit_userid = 'guest', + $rabbit_host = 'localhost', + $rabbit_port = '5672', + $rabbit_hosts = false, + $rabbit_virtual_host = '/', + $rabbit_heartbeat_timeout_threshold = 0, + $rabbit_heartbeat_rate = 2, + $rabbit_use_ssl = false, + $kombu_ssl_ca_certs = undef, + $kombu_ssl_certfile = undef, + $kombu_ssl_keyfile = undef, + $kombu_ssl_version = 'TLSv1', + $rabbit_notification_exchange = 'glance', + $rabbit_notification_topic = 'notifications', + $rabbit_durable_queues = false, + $amqp_durable_queues = false, + $notification_driver = 'messaging', ) { if $rabbit_durable_queues { @@ -106,6 +123,8 @@ 'oslo_messaging_rabbit/rabbit_userid': value => $rabbit_userid; 'oslo_messaging_rabbit/rabbit_notification_exchange': value => $rabbit_notification_exchange; 'oslo_messaging_rabbit/rabbit_notification_topic': value => $rabbit_notification_topic; + 'oslo_messaging_rabbit/heartbeat_timeout_threshold': value => $rabbit_heartbeat_timeout_threshold; + 'oslo_messaging_rabbit/heartbeat_rate': value => $rabbit_heartbeat_rate; 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl; 'DEFAULT/amqp_durable_queues': value => $amqp_durable_queues_real; } diff --git a/glance/metadata.json b/glance/metadata.json index 33e155644..c9c386242 100644 --- a/glance/metadata.json +++ b/glance/metadata.json @@ -1,6 +1,6 @@ { - "name": "stackforge-glance", - "version": "5.1.0", + "name": "openstack-glance", + "version": "6.0.0", "author": "Puppet Labs and OpenStack Contributors", "summary": "Puppet module for OpenStack Glance", "license": "Apache-2.0", @@ -32,8 +32,8 @@ "description": "Installs and configures OpenStack Glance (Image Service).", "dependencies": [ { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0 <6.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } ] } diff --git a/glance/spec/classes/glance_backend_swift_spec.rb b/glance/spec/classes/glance_backend_swift_spec.rb index 65d09ddd7..0993b5a4e 100644 --- a/glance/spec/classes/glance_backend_swift_spec.rb +++ b/glance/spec/classes/glance_backend_swift_spec.rb @@ -30,6 +30,7 @@ is_expected.to contain_glance_api_config('glance_store/swift_store_container').with_value('glance') is_expected.to contain_glance_api_config('glance_store/swift_store_create_container_on_put').with_value(false) is_expected.to contain_glance_api_config('glance_store/swift_store_endpoint_type').with_value('internalURL') + is_expected.to contain_glance_api_config('glance_store/swift_store_region').with_value(nil) end it 'configures glance-cache.conf' do @@ -40,6 +41,7 @@ is_expected.to contain_glance_cache_config('glance_store/swift_store_auth_address').with_value('127.0.0.1:5000/v2.0/') is_expected.to contain_glance_cache_config('glance_store/swift_store_container').with_value('glance') is_expected.to contain_glance_cache_config('glance_store/swift_store_create_container_on_put').with_value(false) + is_expected.to contain_glance_cache_config('glance_store/swift_store_region').with_value(nil) end end @@ -53,7 +55,8 @@ :swift_store_auth_address => '127.0.0.2:8080/v1.0/', :swift_store_container => 'swift', :swift_store_create_container_on_put => true, - :swift_store_endpoint_type => 'publicURL' + :swift_store_endpoint_type => 'publicURL', + :swift_store_region => 'RegionTwo' } end @@ -64,6 +67,7 @@ is_expected.to contain_glance_api_config('glance_store/swift_store_large_object_size').with_value('100') is_expected.to contain_glance_api_config('glance_store/swift_store_auth_address').with_value('127.0.0.2:8080/v1.0/') is_expected.to contain_glance_api_config('glance_store/swift_store_endpoint_type').with_value('publicURL') + is_expected.to contain_glance_api_config('glance_store/swift_store_region').with_value('RegionTwo') end it 'configures glance-cache.conf' do @@ -72,6 +76,7 @@ is_expected.to contain_glance_cache_config('glance_store/swift_store_auth_version').with_value('1') is_expected.to contain_glance_cache_config('glance_store/swift_store_large_object_size').with_value('100') is_expected.to contain_glance_cache_config('glance_store/swift_store_auth_address').with_value('127.0.0.2:8080/v1.0/') + is_expected.to contain_glance_cache_config('glance_store/swift_store_region').with_value('RegionTwo') end end end diff --git a/glance/spec/classes/glance_notify_rabbitmq_spec.rb b/glance/spec/classes/glance_notify_rabbitmq_spec.rb index 7f5f76fa0..ad84e284c 100644 --- a/glance/spec/classes/glance_notify_rabbitmq_spec.rb +++ b/glance/spec/classes/glance_notify_rabbitmq_spec.rb @@ -26,6 +26,8 @@ it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value('/') } it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/rabbit_notification_exchange').with_value('glance') } it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/rabbit_notification_topic').with_value('notifications') } + it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') } + it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') } end describe 'when passing params and use ssl' do @@ -122,6 +124,18 @@ it { is_expected.to_not contain_glance_api_config('oslo_messaging_rabbit/rabbit_host') } end + describe 'when passing params for rabbitmq heartbeat' do + let :params do + { + :rabbit_password => 'pass', + :rabbit_heartbeat_timeout_threshold => '60', + :rabbit_heartbeat_rate => '10', + } + end + it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('60') } + it { is_expected.to contain_glance_api_config('oslo_messaging_rabbit/heartbeat_rate').with_value('10') } + end + describe 'when using deprecated params' do let :params do { diff --git a/glance/spec/spec_helper_acceptance.rb b/glance/spec/spec_helper_acceptance.rb index e29c01cd3..b25e26b7e 100644 --- a/glance/spec/spec_helper_acceptance.rb +++ b/glance/spec/spec_helper_acceptance.rb @@ -38,7 +38,7 @@ zuul_clone_cmd += "git://git.openstack.org #{repo}" on host, zuul_clone_cmd else - on host, "git clone https://git.openstack.org/#{repo} #{repo}" + on host, "git clone -b stable/kilo https://git.openstack.org/#{repo} #{repo}" end on host, "ZUUL_REF=#{zuul_ref} ZUUL_BRANCH=#{zuul_branch} ZUUL_URL=#{zuul_url} bash #{repo}/install_modules.sh" diff --git a/gnocchi/.fixtures.yml b/gnocchi/.fixtures.yml deleted file mode 100644 index 2561c2fce..000000000 --- a/gnocchi/.fixtures.yml +++ /dev/null @@ -1,13 +0,0 @@ -fixtures: - repositories: - 'inifile': 'git://github.com/puppetlabs/puppetlabs-inifile' - 'concat': - 'repo': 'git://github.com/puppetlabs/puppetlabs-concat.git' - 'ref': '1.2.1' - 'keystone': 'git://github.com/stackforge/puppet-keystone.git' - 'mysql': 'git://github.com/puppetlabs/puppetlabs-mysql.git' - 'openstacklib': 'git://github.com/stackforge/puppet-openstacklib.git' - 'postgresql': 'git://github.com/puppetlabs/puppet-postgresql.git' - 'stdlib': 'git://github.com/puppetlabs/puppetlabs-stdlib.git' - symlinks: - 'gnocchi': "#{source_dir}" diff --git a/gnocchi/.gitignore b/gnocchi/.gitignore index da4238187..15c55efd0 100644 --- a/gnocchi/.gitignore +++ b/gnocchi/.gitignore @@ -1,7 +1,11 @@ -*.swp -spec/fixtures/modules/* -spec/fixtures/manifests/site.pp +pkg/ Gemfile.lock -.vendor -.bundle/ vendor/ +spec/fixtures/ +.vagrant/ +.bundle/ +coverage/ +.idea/ +*.swp +*.iml +openstack/ diff --git a/gnocchi/Gemfile b/gnocchi/Gemfile index 9ea211333..fc2214398 100644 --- a/gnocchi/Gemfile +++ b/gnocchi/Gemfile @@ -1,25 +1,31 @@ -source 'https://rubygems.org' +source ENV['GEM_SOURCE'] || "https://rubygems.org" group :development, :test do - gem 'puppetlabs_spec_helper', :require => false - gem 'rspec-puppet', '~> 2.1.0', :require => false - - gem 'metadata-json-lint' - gem 'puppet-lint-param-docs' - gem 'puppet-lint-absolute_classname-check' - gem 'puppet-lint-absolute_template_path' - gem 'puppet-lint-trailing_newline-check' + gem 'puppetlabs_spec_helper', :require => 'false' + gem 'rspec-puppet', '~> 2.2.0', :require => 'false' + gem 'metadata-json-lint', :require => 'false' + gem 'puppet-lint-param-docs', :require => 'false' + gem 'puppet-lint-absolute_classname-check', :require => 'false' + gem 'puppet-lint-absolute_template_path', :require => 'false' + gem 'puppet-lint-trailing_newline-check', :require => 'false' + gem 'puppet-lint-unquoted_string-check', :require => 'false' + gem 'puppet-lint-leading_zero-check', :require => 'false' + gem 'puppet-lint-variable_contains_upcase', :require => 'false' + gem 'puppet-lint-numericvariable', :require => 'false' + gem 'json', :require => 'false' + gem 'webmock', :require => 'false' +end - # Puppet 4.x related lint checks - gem 'puppet-lint-unquoted_string-check' - gem 'puppet-lint-leading_zero-check' - gem 'puppet-lint-variable_contains_upcase' - gem 'puppet-lint-numericvariable' +group :system_tests do + gem 'beaker-rspec', :require => 'false' + gem 'beaker-puppet_install_helper', :require => 'false' + gem 'r10k', :require => 'false' +end - gem 'beaker-rspec', :require => false - gem 'beaker-puppet_install_helper', :require => false - gem 'json' - gem 'webmock' +if facterversion = ENV['FACTER_GEM_VERSION'] + gem 'facter', facterversion, :require => false +else + gem 'facter', :require => false end if puppetversion = ENV['PUPPET_GEM_VERSION'] diff --git a/gnocchi/README.md b/gnocchi/README.md index 5a00b70be..00ac0ed86 100644 --- a/gnocchi/README.md +++ b/gnocchi/README.md @@ -34,6 +34,36 @@ Implementation gnocchi is a combination of Puppet manifest and ruby code to delivery configuration and extra functionality through types and providers. +### Types + +#### gnocchi_config + +The `gnocchi_config` provider is a children of the ini_setting provider. It allows one to write an entry in the `/etc/gnocchi/gnocchi.conf` file. + +```puppet +gnocchi_config { 'DEFAULT/verbose' : + value => true, +} +``` + +This will write `verbose=true` in the `[DEFAULT]` section. + +##### name + +Section/setting name to manage from `gnocchi.conf` + +##### value + +The value of the setting to be defined. + +##### secret + +Whether to hide the value from Puppet logs. Defaults to `false`. + +##### ensure_absent_val + +If value is equal to ensure_absent_val then the resource will behave as if `ensure => absent` was specified. Defaults to `` + Limitations ----------- diff --git a/gnocchi/Rakefile b/gnocchi/Rakefile index 84c9a7046..3c3603e3c 100644 --- a/gnocchi/Rakefile +++ b/gnocchi/Rakefile @@ -1,9 +1,85 @@ require 'puppetlabs_spec_helper/rake_tasks' require 'puppet-lint/tasks/puppet-lint' +require 'puppet-syntax/tasks/puppet-syntax' +require 'json' -PuppetLint.configuration.fail_on_warnings = true -PuppetLint.configuration.send('disable_80chars') -PuppetLint.configuration.send('disable_class_parameter_defaults') +modname = JSON.parse(open('metadata.json').read)['name'].split('-')[1] -task(:default).clear -task :default => [:spec, :lint] +PuppetSyntax.exclude_paths ||= [] +PuppetSyntax.exclude_paths << "spec/fixtures/**/*" +PuppetSyntax.exclude_paths << "pkg/**/*" +PuppetSyntax.exclude_paths << "vendor/**/*" + +Rake::Task[:lint].clear +PuppetLint::RakeTask.new :lint do |config| + config.ignore_paths = ["spec/**/*.pp", "vendor/**/*.pp"] + config.fail_on_warnings = true + config.log_format = '%{path}:%{linenumber}:%{KIND}: %{message}' + config.disable_checks = ["80chars", "class_inherits_from_params_class", "class_parameter_defaults", "only_variable_string"] +end + +desc "Run acceptance tests" +RSpec::Core::RakeTask.new(:acceptance) do |t| + t.pattern = 'spec/acceptance' +end + +Rake::Task[:spec_prep].clear +desc 'Create the fixtures directory' +task :spec_prep do + # Allow to test the module with custom dependencies + # like you could do with .fixtures file + if ENV['PUPPETFILE'] + puppetfile = ENV['PUPPETFILE'] + if ENV['GEM_HOME'] + gem_home = ENV['GEM_HOME'] + gem_bin_dir = "#{gem_home}" + '/bin/' + else + gem_bin_dir = '' + end + r10k = ['env'] + r10k += ["PUPPETFILE=#{puppetfile}"] + r10k += ["PUPPETFILE_DIR=#{Dir.pwd}/spec/fixtures/modules"] + r10k += ["#{gem_bin_dir}r10k"] + r10k += ['puppetfile', 'install', '-v'] + sh(*r10k) + else + # otherwise, use official OpenStack Puppetfile + zuul_ref = ENV['ZUUL_REF'] + zuul_branch = ENV['ZUUL_BRANCH'] + zuul_url = ENV['ZUUL_URL'] + repo = 'openstack/puppet-openstack-integration' + rm_rf(repo) + if File.exists?('/usr/zuul-env/bin/zuul-cloner') + zuul_clone_cmd = ['/usr/zuul-env/bin/zuul-cloner'] + zuul_clone_cmd += ['--cache-dir', '/opt/git'] + zuul_clone_cmd += ['--zuul-ref', "#{zuul_ref}"] + zuul_clone_cmd += ['--zuul-branch', "#{zuul_branch}"] + zuul_clone_cmd += ['--zuul-url', "#{zuul_url}"] + zuul_clone_cmd += ['git://git.openstack.org', "#{repo}"] + sh(*zuul_clone_cmd) + else + sh("git clone https://git.openstack.org/#{repo} #{repo}") + end + script = ['env'] + script += ["PUPPETFILE_DIR=#{Dir.pwd}/spec/fixtures/modules"] + script += ["ZUUL_REF=#{zuul_ref}"] + script += ["ZUUL_BRANCH=#{zuul_branch}"] + script += ["ZUUL_URL=#{zuul_url}"] + script += ['bash', "#{repo}/install_modules_unit.sh"] + sh(*script) + end + rm_rf("spec/fixtures/modules/#{modname}") + ln_s(Dir.pwd, "spec/fixtures/modules/#{modname}") + mkdir_p('spec/fixtures/manifests') + touch('spec/fixtures/manifests/site.pp') +end + +Rake::Task[:spec_clean].clear +desc 'Clean up the fixtures directory' +task :spec_clean do + rm_rf('spec/fixtures/modules') + rm_rf('openstack') + if File.zero?('spec/fixtures/manifests/site.pp') + rm_f('spec/fixtures/manifests/site.pp') + end +end diff --git a/gnocchi/lib/puppet/provider/gnocchi_config/ini_setting.rb b/gnocchi/lib/puppet/provider/gnocchi_config/ini_setting.rb index 0fee4b125..e4f5888aa 100644 --- a/gnocchi/lib/puppet/provider/gnocchi_config/ini_setting.rb +++ b/gnocchi/lib/puppet/provider/gnocchi_config/ini_setting.rb @@ -1,27 +1,10 @@ Puppet::Type.type(:gnocchi_config).provide( :ini_setting, - :parent => Puppet::Type.type(:ini_setting).provider(:ruby) + :parent => Puppet::Type.type(:openstack_config).provider(:ini_setting) ) do - def section - resource[:name].split('/', 2).first - end - - def setting - resource[:name].split('/', 2).last - end - - def separator - '=' - end - def self.file_path '/etc/gnocchi/gnocchi.conf' end - # added for backwards compatibility with older versions of inifile - def file_path - self.class.file_path - end - end diff --git a/gnocchi/lib/puppet/type/gnocchi_config.rb b/gnocchi/lib/puppet/type/gnocchi_config.rb index 9fbf10c0f..7c32cf25c 100644 --- a/gnocchi/lib/puppet/type/gnocchi_config.rb +++ b/gnocchi/lib/puppet/type/gnocchi_config.rb @@ -14,6 +14,7 @@ value.capitalize! if value =~ /^(true|false)$/i value end + newvalues(/^[\S ]*$/) def is_to_s( currentvalue ) if resource.secret? @@ -39,4 +40,14 @@ def should_to_s( newvalue ) defaultto false end + + newparam(:ensure_absent_val) do + desc 'A value that is specified as the value property will behave as if ensure => absent was specified' + defaultto('') + end + + autorequire(:package) do + 'gnocchi-api' + end + end diff --git a/gnocchi/manifests/api.pp b/gnocchi/manifests/api.pp index 86be53de3..bbb12a3f9 100644 --- a/gnocchi/manifests/api.pp +++ b/gnocchi/manifests/api.pp @@ -34,21 +34,33 @@ # # [*verbose*] # (optional) Rather to log the gnocchi api service at verbose level. -# Default: false +# Defaults to undef # # [*debug*] # (optional) Rather to log the gnocchi api service at debug level. -# Default: false +# Defaults to undef # # [*log_file*] # (optional) The path of file used for logging # If set to boolean false, it will not log to any file. -# Default: /var/log/gnocchi/gnocchi-api.log +# Defaults to undef # -# [*log_dir*] +# [*use_syslog*] +# (Optional) Use syslog for logging. +# Defaults to undef +# +# [*use_stderr*] +# (optional) Use stderr for logging +# Defaults to undef +# +# [*log_facility*] +# (Optional) Syslog facility to receive log lines. +# Defaults to undef +# +# [*log_dir*] # (optional) directory to which gnocchi logs are sent. # If set to boolean false, it will not log to any directory. -# Defaults to '/var/log/gnocchi' +# Defaults to undef # # [*keystone_tenant*] # (optional) Tenant to authenticate to. @@ -81,10 +93,13 @@ # class gnocchi::api( $keystone_password, - $verbose = false, - $debug = false, - $log_file = '/var/log/gnocchi/gnocchi-api.log', - $log_dir = '/var/log/gnocchi', + $verbose = undef, + $debug = undef, + $use_syslog = undef, + $use_stderr = undef, + $log_facility = undef, + $log_dir = undef, + $log_file = undef, $keystone_tenant = 'services', $keystone_user = 'gnocchi', $identity_uri = 'http://127.0.0.1:35357', @@ -97,11 +112,11 @@ ) inherits gnocchi { require ::keystone::python + include ::gnocchi::logging include ::gnocchi::params Gnocchi_config<||> ~> Exec['post-gnocchi_config'] Gnocchi_config<||> ~> Service['gnocchi-api'] - Package['gnocchi-api'] -> Gnocchi_config<||> if $::gnocchi::database_connection { if($::gnocchi::database_connection =~ /mysql:\/\/\S+:\S+@\S+\/\S+/) { @@ -122,47 +137,12 @@ # basic service config gnocchi_config { - 'DEFAULT/verbose': value => $verbose; - 'DEFAULT/debug': value => $debug; 'keystone_authtoken/identity_uri': value => $identity_uri; 'keystone_authtoken/admin_user': value => $keystone_user; 'keystone_authtoken/admin_password': value => $keystone_password, secret => true; 'keystone_authtoken/admin_tenant_name': value => $keystone_tenant; } - # Logging - if $log_file { - gnocchi_config { - 'DEFAULT/log_file': value => $log_file; - } - } else { - gnocchi_config { - 'DEFAULT/log_file': ensure => absent; - } - } - - if $log_dir { - gnocchi_config { - 'DEFAULT/log_dir': value => $log_dir; - } - } else { - gnocchi_config { - 'DEFAULT/log_dir': ensure => absent; - } - } - - # Syslog - if $use_syslog { - gnocchi_config { - 'DEFAULT/use_syslog' : value => true; - 'DEFAULT/syslog_log_facility' : value => $log_facility; - } - } else { - gnocchi_config { - 'DEFAULT/use_syslog': value => false; - } - } - resources { 'gnocchi_config': purge => $purge_config, } diff --git a/gnocchi/manifests/config.pp b/gnocchi/manifests/config.pp new file mode 100644 index 000000000..7e473ec9b --- /dev/null +++ b/gnocchi/manifests/config.pp @@ -0,0 +1,30 @@ +# == Class: gnocchi::config +# +# This class is used to manage arbitrary Gnocchi configurations. +# +# === Parameters +# +# [*gnocchi_config*] +# (optional) Allow configuration of arbitrary Gnocchi configurations. +# The value is an hash of gnocchi_config resources. Example: +# { 'DEFAULT/foo' => { value => 'fooValue'}, +# 'DEFAULT/bar' => { value => 'barValue'} +# } +# In yaml format, Example: +# gnocchi_config: +# DEFAULT/foo: +# value: fooValue +# DEFAULT/bar: +# value: barValue +# +# NOTE: The configuration MUST NOT be already handled by this module +# or Puppet catalog compilation will fail with duplicate resources. +# +class gnocchi::config ( + $gnocchi_config = {}, +) { + + validate_hash($gnocchi_config) + + create_resources('gnocchi_config', $gnocchi_config) +} diff --git a/gnocchi/manifests/generic_service.pp b/gnocchi/manifests/generic_service.pp index 9530cc151..9e0e898a0 100644 --- a/gnocchi/manifests/generic_service.pp +++ b/gnocchi/manifests/generic_service.pp @@ -68,6 +68,7 @@ ensure => $ensure_package, name => $package_name, notify => Service[$gnocchi_title], + tag => ['openstack', 'gnocchi-package'], } } } @@ -86,6 +87,7 @@ name => $service_name, enable => $enabled, hasstatus => true, + tag => 'gnocchi-service', } } } diff --git a/gnocchi/manifests/keystone/auth.pp b/gnocchi/manifests/keystone/auth.pp index cb010d9d6..129b0e253 100644 --- a/gnocchi/manifests/keystone/auth.pp +++ b/gnocchi/manifests/keystone/auth.pp @@ -45,6 +45,9 @@ # [*service_type*] # Type of service. Defaults to 'gnocchi'. # +# [*service_description*] +# Description for keystone service. Optional. Defaults to 'OpenStack Datapoint Service'. +# # [*public_protocol*] # Protocol for public endpoint. Defaults to 'http'. # @@ -88,6 +91,7 @@ $configure_user = true, $configure_user_role = true, $service_type = 'gnocchi', + $service_description = 'OpenStack Datapoint Service', $public_protocol = 'http', $public_address = '127.0.0.1', $public_port = '8041', @@ -108,7 +112,7 @@ configure_user_role => true, configure_endpoint => $configure_endpoint, service_type => $service_type, - service_description => 'OpenStack Datapoint Service', + service_description => $service_description, region => $region, password => $password, email => $email, diff --git a/gnocchi/manifests/logging.pp b/gnocchi/manifests/logging.pp new file mode 100644 index 000000000..38586ab2e --- /dev/null +++ b/gnocchi/manifests/logging.pp @@ -0,0 +1,265 @@ +# Class gnocchi::logging +# +# gnocchi logging configuration +# +# == parameters +# +# [*verbose*] +# (Optional) Should the daemons log verbose messages +# Defaults to 'false' +# +# [*debug*] +# (Optional) Should the daemons log debug messages +# Defaults to 'false' +# +# [*use_syslog*] +# (Optional) Use syslog for logging. +# Defaults to 'false' +# +# [*use_stderr*] +# (optional) Use stderr for logging +# Defaults to 'true' +# +# [*log_facility*] +# (Optional) Syslog facility to receive log lines. +# Defaults to 'LOG_USER' +# +# [*log_dir*] +# (optional) Directory where logs should be stored. +# If set to boolean false, it will not log to any directory. +# Defaults to '/var/log/gnocchi' +# +# [*log_file*] +# (optional) The path of file used for logging +# If set to boolean false, it will not log to any file. +# Default to '/var/log/gnocchi-api.log' +# +# [*logging_context_format_string*] +# (optional) Format string to use for log messages with context. +# Defaults to undef. +# Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ +# [%(request_id)s %(user_identity)s] %(instance)s%(message)s' +# +# [*logging_default_format_string*] +# (optional) Format string to use for log messages without context. +# Defaults to undef. +# Example: '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s\ +# [-] %(instance)s%(message)s' +# +# [*logging_debug_format_suffix*] +# (optional) Formatted data to append to log format when level is DEBUG. +# Defaults to undef. +# Example: '%(funcName)s %(pathname)s:%(lineno)d' +# +# [*logging_exception_prefix*] +# (optional) Prefix each line of exception output with this format. +# Defaults to undef. +# Example: '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s' +# +# [*log_config_append*] +# The name of an additional logging configuration file. +# Defaults to undef. +# See https://docs.python.org/2/howto/logging.html +# +# [*default_log_levels*] +# (optional) Hash of logger (keys) and level (values) pairs. +# Defaults to undef. +# Example: +# { 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', +# 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', +# 'iso8601' => 'WARN', +# 'requests.packages.urllib3.connectionpool' => 'WARN' } +# +# [*publish_errors*] +# (optional) Publish error events (boolean value). +# Defaults to undef (false if unconfigured). +# +# [*fatal_deprecations*] +# (optional) Make deprecations fatal (boolean value) +# Defaults to undef (false if unconfigured). +# +# [*instance_format*] +# (optional) If an instance is passed with the log message, format it +# like this (string value). +# Defaults to undef. +# Example: '[instance: %(uuid)s] ' +# +# [*instance_uuid_format*] +# (optional) If an instance UUID is passed with the log message, format +# it like this (string value). +# Defaults to undef. +# Example: instance_uuid_format='[instance: %(uuid)s] ' + +# [*log_date_format*] +# (optional) Format string for %%(asctime)s in log records. +# Defaults to undef. +# Example: 'Y-%m-%d %H:%M:%S' + +class gnocchi::logging( + $use_syslog = false, + $use_stderr = true, + $log_facility = 'LOG_USER', + $log_dir = '/var/log/gnocchi', + $log_file = '/var/log/gnocchi/gnocchi-api.log', + $verbose = false, + $debug = false, + $logging_context_format_string = undef, + $logging_default_format_string = undef, + $logging_debug_format_suffix = undef, + $logging_exception_prefix = undef, + $log_config_append = undef, + $default_log_levels = undef, + $publish_errors = undef, + $fatal_deprecations = undef, + $instance_format = undef, + $instance_uuid_format = undef, + $log_date_format = undef, +) { + + # NOTE(spredzy): In order to keep backward compatibility we rely on the pick function + # to use gnocchi:: first then gnocchi::logging::. + $use_syslog_real = pick($::gnocchi::api::use_syslog,$use_syslog) + $use_stderr_real = pick($::gnocchi::api::use_stderr,$use_stderr) + $log_facility_real = pick($::gnocchi::api::log_facility,$log_facility) + $log_dir_real = pick($::gnocchi::api::log_dir,$log_dir) + $log_file_real = pick($::gnocchi::api::log_file,$log_file) + $verbose_real = pick($::gnocchi::api::verbose,$verbose) + $debug_real = pick($::gnocchi::api::debug,$debug) + + gnocchi_config { + 'DEFAULT/debug' : value => $debug_real; + 'DEFAULT/verbose' : value => $verbose_real; + 'DEFAULT/use_stderr' : value => $use_stderr_real; + 'DEFAULT/use_syslog' : value => $use_syslog_real; + 'DEFAULT/log_dir' : value => $log_dir_real; + 'DEFAULT/log_file' : value => $log_file_real; + 'DEFAULT/syslog_log_facility': value => $log_facility_real; + } + + if $logging_context_format_string { + gnocchi_config { + 'DEFAULT/logging_context_format_string' : + value => $logging_context_format_string; + } + } + else { + gnocchi_config { + 'DEFAULT/logging_context_format_string' : ensure => absent; + } + } + + if $logging_default_format_string { + gnocchi_config { + 'DEFAULT/logging_default_format_string' : + value => $logging_default_format_string; + } + } + else { + gnocchi_config { + 'DEFAULT/logging_default_format_string' : ensure => absent; + } + } + + if $logging_debug_format_suffix { + gnocchi_config { + 'DEFAULT/logging_debug_format_suffix' : + value => $logging_debug_format_suffix; + } + } + else { + gnocchi_config { + 'DEFAULT/logging_debug_format_suffix' : ensure => absent; + } + } + + if $logging_exception_prefix { + gnocchi_config { + 'DEFAULT/logging_exception_prefix' : value => $logging_exception_prefix; + } + } + else { + gnocchi_config { + 'DEFAULT/logging_exception_prefix' : ensure => absent; + } + } + + if $log_config_append { + gnocchi_config { + 'DEFAULT/log_config_append' : value => $log_config_append; + } + } + else { + gnocchi_config { + 'DEFAULT/log_config_append' : ensure => absent; + } + } + + if $default_log_levels { + gnocchi_config { + 'DEFAULT/default_log_levels' : + value => join(sort(join_keys_to_values($default_log_levels, '=')), ','); + } + } + else { + gnocchi_config { + 'DEFAULT/default_log_levels' : ensure => absent; + } + } + + if $publish_errors { + gnocchi_config { + 'DEFAULT/publish_errors' : value => $publish_errors; + } + } + else { + gnocchi_config { + 'DEFAULT/publish_errors' : ensure => absent; + } + } + + if $fatal_deprecations { + gnocchi_config { + 'DEFAULT/fatal_deprecations' : value => $fatal_deprecations; + } + } + else { + gnocchi_config { + 'DEFAULT/fatal_deprecations' : ensure => absent; + } + } + + if $instance_format { + gnocchi_config { + 'DEFAULT/instance_format' : value => $instance_format; + } + } + else { + gnocchi_config { + 'DEFAULT/instance_format' : ensure => absent; + } + } + + if $instance_uuid_format { + gnocchi_config { + 'DEFAULT/instance_uuid_format' : value => $instance_uuid_format; + } + } + else { + gnocchi_config { + 'DEFAULT/instance_uuid_format' : ensure => absent; + } + } + + if $log_date_format { + gnocchi_config { + 'DEFAULT/log_date_format' : value => $log_date_format; + } + } + else { + gnocchi_config { + 'DEFAULT/log_date_format' : ensure => absent; + } + } + + +} diff --git a/gnocchi/spec/acceptance/nodesets/centos-70-x64.yml b/gnocchi/spec/acceptance/nodesets/centos-70-x64.yml new file mode 100644 index 000000000..5f097e9fe --- /dev/null +++ b/gnocchi/spec/acceptance/nodesets/centos-70-x64.yml @@ -0,0 +1,11 @@ +HOSTS: + centos-server-70-x64: + roles: + - master + platform: el-7-x86_64 + box: puppetlabs/centos-7.0-64-nocm + box_url: https://vagrantcloud.com/puppetlabs/centos-7.0-64-nocm + hypervisor: vagrant +CONFIG: + log_level: debug + type: foss diff --git a/gnocchi/spec/acceptance/nodesets/default.yml b/gnocchi/spec/acceptance/nodesets/default.yml index cba1cd04c..486b6a34e 100644 --- a/gnocchi/spec/acceptance/nodesets/default.yml +++ b/gnocchi/spec/acceptance/nodesets/default.yml @@ -1,11 +1,10 @@ HOSTS: - ubuntu-server-1404-x64: + ubuntu-server-14.04-amd64: roles: - master platform: ubuntu-14.04-amd64 - box : puppetlabs/ubuntu-14.04-64-nocm - box_url : https://vagrantcloud.com/puppetlabs/ubuntu-14.04-64-nocm - hypervisor : vagrant + box: puppetlabs/ubuntu-14.04-64-nocm + box_url: https://vagrantcloud.com/puppetlabs/ubuntu-14.04-64-nocm + hypervisor: vagrant CONFIG: - log_level : debug - type: git + type: foss diff --git a/gnocchi/spec/acceptance/nodesets/nodepool-centos7.yml b/gnocchi/spec/acceptance/nodesets/nodepool-centos7.yml index 575ae6732..c55287420 100644 --- a/gnocchi/spec/acceptance/nodesets/nodepool-centos7.yml +++ b/gnocchi/spec/acceptance/nodesets/nodepool-centos7.yml @@ -3,7 +3,7 @@ HOSTS: roles: - master platform: el-7-x86_64 - hypervisor : none + hypervisor: none ip: 127.0.0.1 CONFIG: type: foss diff --git a/gnocchi/spec/acceptance/nodesets/nodepool-trusty.yml b/gnocchi/spec/acceptance/nodesets/nodepool-trusty.yml index a95d9f38d..9fc624e24 100644 --- a/gnocchi/spec/acceptance/nodesets/nodepool-trusty.yml +++ b/gnocchi/spec/acceptance/nodesets/nodepool-trusty.yml @@ -3,7 +3,7 @@ HOSTS: roles: - master platform: ubuntu-14.04-amd64 - hypervisor : none + hypervisor: none ip: 127.0.0.1 CONFIG: type: foss diff --git a/gnocchi/spec/acceptance/nodesets/ubuntu-server-1404-x64.yml b/gnocchi/spec/acceptance/nodesets/ubuntu-server-1404-x64.yml index cba1cd04c..8001929b2 100644 --- a/gnocchi/spec/acceptance/nodesets/ubuntu-server-1404-x64.yml +++ b/gnocchi/spec/acceptance/nodesets/ubuntu-server-1404-x64.yml @@ -1,11 +1,11 @@ HOSTS: - ubuntu-server-1404-x64: + ubuntu-server-14.04-amd64: roles: - master platform: ubuntu-14.04-amd64 - box : puppetlabs/ubuntu-14.04-64-nocm - box_url : https://vagrantcloud.com/puppetlabs/ubuntu-14.04-64-nocm - hypervisor : vagrant + box: puppetlabs/ubuntu-14.04-64-nocm + box_url: https://vagrantcloud.com/puppetlabs/ubuntu-14.04-64-nocm + hypervisor: vagrant CONFIG: - log_level : debug - type: git + log_level: debug + type: foss diff --git a/gnocchi/spec/classes/gnocchi_api_spec.rb b/gnocchi/spec/classes/gnocchi_api_spec.rb index 244c875e3..fd1e04022 100644 --- a/gnocchi/spec/classes/gnocchi_api_spec.rb +++ b/gnocchi/spec/classes/gnocchi_api_spec.rb @@ -17,6 +17,10 @@ context 'with default parameters' do + it 'contains the logging class' do + is_expected.to contain_class('gnocchi::logging') + end + it 'installs gnocchi-api package and service' do is_expected.to contain_service('gnocchi-api').with( :name => platform_params[:api_service_name], @@ -32,8 +36,6 @@ end it 'configures gnocchi-api with default parameters' do - is_expected.to contain_gnocchi_config('DEFAULT/verbose').with_value(false) - is_expected.to contain_gnocchi_config('DEFAULT/debug').with_value(false) is_expected.to contain_gnocchi_config('keystone_authtoken/identity_uri').with_value(params[:identity_uri]) is_expected.to contain_gnocchi_config('keystone_authtoken/admin_tenant_name').with_value(params[:keystone_tenant]) is_expected.to contain_gnocchi_config('keystone_authtoken/admin_user').with_value(params[:keystone_user]) diff --git a/gnocchi/spec/classes/gnocchi_config_spec.rb b/gnocchi/spec/classes/gnocchi_config_spec.rb new file mode 100644 index 000000000..edf53aeb8 --- /dev/null +++ b/gnocchi/spec/classes/gnocchi_config_spec.rb @@ -0,0 +1,20 @@ +require 'spec_helper' + +describe 'gnocchi::config' do + + let :params do + { :gnocchi_config => { + 'DEFAULT/foo' => { 'value' => 'fooValue' }, + 'DEFAULT/bar' => { 'value' => 'barValue' }, + 'DEFAULT/baz' => { 'ensure' => 'absent' } + } + } + end + + it 'configures arbitrary gnocchi configurations' do + is_expected.to contain_gnocchi_config('DEFAULT/foo').with_value('fooValue') + is_expected.to contain_gnocchi_config('DEFAULT/bar').with_value('barValue') + is_expected.to contain_gnocchi_config('DEFAULT/baz').with_ensure('absent') + end + +end diff --git a/gnocchi/spec/classes/gnocchi_logging_spec.rb b/gnocchi/spec/classes/gnocchi_logging_spec.rb new file mode 100644 index 000000000..6a4c3e4a5 --- /dev/null +++ b/gnocchi/spec/classes/gnocchi_logging_spec.rb @@ -0,0 +1,146 @@ +require 'spec_helper' + +describe 'gnocchi::logging' do + + let :params do + { + } + end + + let :log_params do + { + :logging_context_format_string => '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s', + :logging_default_format_string => '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s', + :logging_debug_format_suffix => '%(funcName)s %(pathname)s:%(lineno)d', + :logging_exception_prefix => '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s', + :log_config_append => '/etc/gnocchi/logging.conf', + :publish_errors => true, + :default_log_levels => { + 'amqp' => 'WARN', 'amqplib' => 'WARN', 'boto' => 'WARN', + 'qpid' => 'WARN', 'sqlalchemy' => 'WARN', 'suds' => 'INFO', + 'iso8601' => 'WARN', + 'requests.packages.urllib3.connectionpool' => 'WARN' }, + :fatal_deprecations => true, + :instance_format => '[instance: %(uuid)s] ', + :instance_uuid_format => '[instance: %(uuid)s] ', + :log_date_format => '%Y-%m-%d %H:%M:%S', + :use_syslog => true, + :use_stderr => false, + :log_facility => 'LOG_FOO', + :log_dir => '/var/log', + :log_file => '/var/log/foo.log', + :verbose => true, + :debug => true, + } + end + + shared_examples_for 'gnocchi-logging' do + + context 'with basic logging options and default settings' do + it_configures 'basic default logging settings' + end + + context 'with basic logging options and non-default settings' do + before { params.merge!( log_params ) } + it_configures 'basic non-default logging settings' + end + + context 'with extended logging options' do + before { params.merge!( log_params ) } + it_configures 'logging params set' + end + + context 'without extended logging options' do + it_configures 'logging params unset' + end + + end + + shared_examples 'basic default logging settings' do + it 'configures gnocchi logging settins with default values' do + is_expected.to contain_gnocchi_config('DEFAULT/use_syslog').with(:value => 'false') + is_expected.to contain_gnocchi_config('DEFAULT/use_stderr').with(:value => 'true') + is_expected.to contain_gnocchi_config('DEFAULT/log_dir').with(:value => '/var/log/gnocchi') + is_expected.to contain_gnocchi_config('DEFAULT/log_file').with(:value => '/var/log/gnocchi/gnocchi-api.log') + is_expected.to contain_gnocchi_config('DEFAULT/verbose').with(:value => 'false') + is_expected.to contain_gnocchi_config('DEFAULT/debug').with(:value => 'false') + end + end + + shared_examples 'basic non-default logging settings' do + it 'configures gnocchi logging settins with non-default values' do + is_expected.to contain_gnocchi_config('DEFAULT/use_syslog').with(:value => 'true') + is_expected.to contain_gnocchi_config('DEFAULT/use_stderr').with(:value => 'false') + is_expected.to contain_gnocchi_config('DEFAULT/syslog_log_facility').with(:value => 'LOG_FOO') + is_expected.to contain_gnocchi_config('DEFAULT/log_dir').with(:value => '/var/log') + is_expected.to contain_gnocchi_config('DEFAULT/log_file').with(:value => '/var/log/foo.log') + is_expected.to contain_gnocchi_config('DEFAULT/verbose').with(:value => 'true') + is_expected.to contain_gnocchi_config('DEFAULT/debug').with(:value => 'true') + end + end + + shared_examples_for 'logging params set' do + it 'enables logging params' do + is_expected.to contain_gnocchi_config('DEFAULT/logging_context_format_string').with_value( + '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s') + + is_expected.to contain_gnocchi_config('DEFAULT/logging_default_format_string').with_value( + '%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s') + + is_expected.to contain_gnocchi_config('DEFAULT/logging_debug_format_suffix').with_value( + '%(funcName)s %(pathname)s:%(lineno)d') + + is_expected.to contain_gnocchi_config('DEFAULT/logging_exception_prefix').with_value( + '%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s') + + is_expected.to contain_gnocchi_config('DEFAULT/log_config_append').with_value( + '/etc/gnocchi/logging.conf') + is_expected.to contain_gnocchi_config('DEFAULT/publish_errors').with_value( + true) + + is_expected.to contain_gnocchi_config('DEFAULT/default_log_levels').with_value( + 'amqp=WARN,amqplib=WARN,boto=WARN,iso8601=WARN,qpid=WARN,requests.packages.urllib3.connectionpool=WARN,sqlalchemy=WARN,suds=INFO') + + is_expected.to contain_gnocchi_config('DEFAULT/fatal_deprecations').with_value( + true) + + is_expected.to contain_gnocchi_config('DEFAULT/instance_format').with_value( + '[instance: %(uuid)s] ') + + is_expected.to contain_gnocchi_config('DEFAULT/instance_uuid_format').with_value( + '[instance: %(uuid)s] ') + + is_expected.to contain_gnocchi_config('DEFAULT/log_date_format').with_value( + '%Y-%m-%d %H:%M:%S') + end + end + + + shared_examples_for 'logging params unset' do + [ :logging_context_format_string, :logging_default_format_string, + :logging_debug_format_suffix, :logging_exception_prefix, + :log_config_append, :publish_errors, + :default_log_levels, :fatal_deprecations, + :instance_format, :instance_uuid_format, + :log_date_format, ].each { |param| + it { is_expected.to contain_gnocchi_config("DEFAULT/#{param}").with_ensure('absent') } + } + end + + context 'on Debian platforms' do + let :facts do + { :osfamily => 'Debian' } + end + + it_configures 'gnocchi-logging' + end + + context 'on RedHat platforms' do + let :facts do + { :osfamily => 'RedHat' } + end + + it_configures 'gnocchi-logging' + end + +end diff --git a/gnocchi/spec/shared_examples.rb b/gnocchi/spec/shared_examples.rb index 4bc52a928..a888e4622 100644 --- a/gnocchi/spec/shared_examples.rb +++ b/gnocchi/spec/shared_examples.rb @@ -11,13 +11,15 @@ is_expected.to contain_package(service[:name]).with({ :name => service[:package_name], :ensure => 'present', - :notify => "Service[#{service[:name]}]" + :notify => "Service[#{service[:name]}]", + :tag => ['openstack', 'gnocchi-package'], }) is_expected.to contain_service(service[:name]).with({ :name => service[:service_name], :ensure => 'stopped', :hasstatus => true, - :enable => false + :enable => false, + :tag => 'gnocchi-service', }) end end @@ -32,13 +34,15 @@ is_expected.to contain_package(service[:name]).with({ :name => service[:package_name], :ensure => '2014.1-1', - :notify => "Service[#{service[:name]}]" + :notify => "Service[#{service[:name]}]", + :tag => ['openstack', 'gnocchi-package'], }) is_expected.to contain_service(service[:name]).with({ :name => service[:service_name], :ensure => 'running', :hasstatus => true, - :enable => true + :enable => true, + :tag => 'gnocchi-service', }) end end diff --git a/gnocchi/spec/spec_helper.rb b/gnocchi/spec/spec_helper.rb index 53d4dd02d..3df4cede1 100644 --- a/gnocchi/spec/spec_helper.rb +++ b/gnocchi/spec/spec_helper.rb @@ -1,7 +1,10 @@ require 'puppetlabs_spec_helper/module_spec_helper' require 'shared_examples' +require 'webmock/rspec' RSpec.configure do |c| c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' end + +at_exit { RSpec::Puppet::Coverage.report! } diff --git a/gnocchi/spec/unit/provider/gnocchi_config/ini_setting_spec.rb b/gnocchi/spec/unit/provider/gnocchi_config/ini_setting_spec.rb index 059a5a2f5..a3fbe1cbb 100644 --- a/gnocchi/spec/unit/provider/gnocchi_config/ini_setting_spec.rb +++ b/gnocchi/spec/unit/provider/gnocchi_config/ini_setting_spec.rb @@ -13,6 +13,17 @@ 'inifile', 'lib') ) +$LOAD_PATH.push( + File.join( + File.dirname(__FILE__), + '..', + '..', + '..', + 'fixtures', + 'modules', + 'openstacklib', + 'lib') +) require 'spec_helper' provider_class = Puppet::Type.type(:gnocchi_config).provider(:ini_setting) describe provider_class do @@ -34,4 +45,22 @@ expect(provider.section).to eq('dude') expect(provider.setting).to eq('foo') end + + it 'should ensure absent when is specified as a value' do + resource = Puppet::Type::Gnocchi_config.new( + {:name => 'dude/foo', :value => ''} + ) + provider = provider_class.new(resource) + provider.exists? + expect(resource[:ensure]).to eq :absent + end + + it 'should ensure absent when value matches ensure_absent_val' do + resource = Puppet::Type::Gnocchi_config.new( + {:name => 'dude/foo', :value => 'foo', :ensure_absent_val => 'foo' } + ) + provider = provider_class.new(resource) + provider.exists? + expect(resource[:ensure]).to eq :absent + end end diff --git a/gnocchi/spec/unit/type/gnocchi_config_spec.rb b/gnocchi/spec/unit/type/gnocchi_config_spec.rb index d711b0345..64580fe4c 100644 --- a/gnocchi/spec/unit/type/gnocchi_config_spec.rb +++ b/gnocchi/spec/unit/type/gnocchi_config_spec.rb @@ -49,4 +49,15 @@ @gnocchi_config[:ensure] = :latest }.to raise_error(Puppet::Error, /Invalid value/) end + + it 'should autorequire the package that install the file' do + catalog = Puppet::Resource::Catalog.new + package = Puppet::Type.type(:package).new(:name => 'gnocchi-api') + catalog.add_resource package, @gnocchi_config + dependency = @gnocchi_config.autorequire + expect(dependency.size).to eq(1) + expect(dependency[0].target).to eq(@gnocchi_config) + expect(dependency[0].source).to eq(package) + end + end diff --git a/heat/CHANGELOG.md b/heat/CHANGELOG.md index fd37e6d33..131c7b961 100644 --- a/heat/CHANGELOG.md +++ b/heat/CHANGELOG.md @@ -1,3 +1,36 @@ +##2015-07-08 - 6.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Kilo. + +####Backwards-incompatible changes +- Move rabbit/kombu settings to oslo_messaging_rabbit section + +####Features +- Puppet 4.x support +- Implement Keystone domain creation +- Log output of heat-keystone-setup-domain +- Refactorise Keystone resources management +- Move keystone role creation to keystone area +- Support region_name for Heat +- Mark heat's keystone password as secret +- Add support for identity_uri +- Make configuring the service optional +- Set instance_user in heat +- Added missing enable_stack_abandon configuration option +- Tag all Heat packages +- Create a sync_db boolean for Heat +- Engine: validate auth_encryption_key +- Allow setting default config/signal transport +- Run db_sync when heat-common is upgraded +- Introduce public_url, internal_url and admin_url + +####Maintenance +- Acceptance tests with Beaker +- Fix spec tests for RSpec 3.x and Puppet 4.x +- Rename keystone_v2_authenticate method +- Make package_ensure consistent across classes + ##2015-06-17 - 5.1.0 ###Summary diff --git a/heat/README.md b/heat/README.md index b75cd196f..246c4b6d7 100644 --- a/heat/README.md +++ b/heat/README.md @@ -1,7 +1,7 @@ puppet-heat ============= -5.1.0 - 2014.2 - Juno +6.0.0 - 2015.1 - Kilo #### Table of Contents diff --git a/heat/lib/puppet/provider/heat_domain_id_setter/ruby.rb b/heat/lib/puppet/provider/heat_domain_id_setter/ruby.rb deleted file mode 100644 index 12ce71acf..000000000 --- a/heat/lib/puppet/provider/heat_domain_id_setter/ruby.rb +++ /dev/null @@ -1,189 +0,0 @@ -## NB: This must work with Ruby 1.8! - -# This provider permits the stack_user_domain parameter in heat.conf -# to be set by providing a domain_name to the Puppet module and -# using the Keystone REST API to translate the name into the corresponding -# UUID. -# -# This requires that tenant names be unique. If there are multiple matches -# for a given tenant name, this provider will raise an exception. - -require 'rubygems' -require 'net/http' -require 'json' - -class KeystoneError < Puppet::Error -end - -class KeystoneConnectionError < KeystoneError -end - -class KeystoneAPIError < KeystoneError -end - -# Provides common request handling semantics to the other methods in -# this module. -# -# +req+:: -# An HTTPRequest object -# +url+:: -# A parsed URL (returned from URI.parse) -def handle_request(req, url) - begin - # There is issue with ipv6 where address has to be in brackets, this causes the - # underlying ruby TCPSocket to fail. Net::HTTP.new will fail without brackets on - # joining the ipv6 address with :port or passing brackets to TCPSocket. It was - # found that if we use Net::HTTP.start with url.hostname the incriminated code - # won't be hit. - use_ssl = url.scheme == "https" ? true : false - res = Net::HTTP.start(url.hostname, url.port, {:use_ssl => use_ssl}) {|http| - http.request(req) - } - - if res.code != '200' - raise KeystoneAPIError, "Received error response from Keystone server at #{url}: #{res.message}" - end - rescue Errno::ECONNREFUSED => detail - raise KeystoneConnectionError, "Failed to connect to Keystone server at #{url}: #{detail}" - rescue SocketError => detail - raise KeystoneConnectionError, "Failed to connect to Keystone server at #{url}: #{detail}" - end - - res -end - -# Authenticates to a Keystone server and obtains an authentication token. -# It returns a 2-element +[token, authinfo]+, where +token+ is a token -# suitable for passing to openstack apis in the +X-Auth-Token+ header, and -# +authinfo+ is the complete response from Keystone, including the service -# catalog (if available). -# -# +auth_url+:: -# Keystone endpoint URL. This function assumes API version -# 2.0 and an administrative endpoint, so this will typically look like -# +http://somehost:35357/v2.0+. -# -# +username+:: -# Username for authentication. -# -# +password+:: -# Password for authentication -# -# +tenantID+:: -# Tenant UUID -# -# +tenantName+:: -# Tenant name -# -def heat_handle_requests(auth_url, - username, - password, - tenantId=nil, - tenantName=nil) - - post_args = { - 'auth' => { - 'passwordCredentials' => { - 'username' => username, - 'password' => password - }, - }} - - if tenantId - post_args['auth']['tenantId'] = tenantId - end - - if tenantName - post_args['auth']['tenantName'] = tenantName - end - - url = URI.parse("#{auth_url}/tokens") - req = Net::HTTP::Post.new url.path - req['content-type'] = 'application/json' - req.body = post_args.to_json - - res = handle_request(req, url) - data = JSON.parse res.body - return data['access']['token']['id'], data -end - -# Queries a Keystone server to a list of all tenants. -# -# +auth_url+:: -# Keystone endpoint. See the notes for +auth_url+ in -# +heat_handle_requests+. -# -# +token+:: -# A Keystone token that will be passed in requests as the value of the -# +X-Auth-Token+ header. -# -def keystone_v3_domains(auth_url, - token) - - auth_url.sub!('v2.0', 'v3') - url = URI.parse("#{auth_url}/domains") - req = Net::HTTP::Get.new url.path - req['content-type'] = 'application/json' - req['x-auth-token'] = token - - res = handle_request(req, url) - data = JSON.parse res.body - data['domains'] -end - -Puppet::Type.type(:heat_domain_id_setter).provide(:ruby) do - def authenticate - token, authinfo = heat_handle_requests( - @resource[:auth_url], - @resource[:auth_username], - @resource[:auth_password], - nil, - @resource[:auth_tenant_name]) - - return token - end - - def find_domain_by_name(token) - domains = keystone_v3_domains( - @resource[:auth_url], - token) - domains.select{|domain| domain['name'] == @resource[:domain_name]} - end - - def exists? - false - end - - def create - config - end - - # This looks for the domain specified by the 'domain_name' parameter to - # the resource and returns the corresponding UUID if there is a single - # match. - # - # Raises a KeystoneAPIError if: - # - # - There are multiple matches, or - # - There are zero matches - def get_domain_id - token = authenticate - domains = find_domain_by_name(token) - - if domains.length == 1 - return domains[0]['id'] - elsif domains.length > 1 - name = domains[0]['name'] - raise KeystoneAPIError, 'Found multiple matches for domain name "#{name}"' - else - raise KeystoneAPIError, 'Unable to find matching domain' - end - end - - def config - Puppet::Type.type(:heat_config).new( - {:name => 'DEFAULT/stack_user_domain', :value => "#{get_domain_id}"} - ).create - end - -end diff --git a/heat/lib/puppet/type/heat_domain_id_setter.rb b/heat/lib/puppet/type/heat_domain_id_setter.rb deleted file mode 100644 index d6e1eeef0..000000000 --- a/heat/lib/puppet/type/heat_domain_id_setter.rb +++ /dev/null @@ -1,31 +0,0 @@ -Puppet::Type.newtype(:heat_domain_id_setter) do - - ensurable - - newparam(:name, :namevar => true) do - desc 'The name of the setting to update' - end - - newparam(:domain_name) do - desc 'The heat domain name' - end - - newparam(:auth_url) do - desc 'The Keystone endpoint URL' - defaultto 'http://localhost:35357/v2.0' - end - - newparam(:auth_username) do - desc 'Username with which to authenticate' - defaultto 'admin' - end - - newparam(:auth_password) do - desc 'Password with which to authenticate' - end - - newparam(:auth_tenant_name) do - desc 'Tenant name with which to authenticate' - defaultto 'admin' - end -end diff --git a/heat/manifests/init.pp b/heat/manifests/init.pp index b6a7327df..76c002ead 100644 --- a/heat/manifests/init.pp +++ b/heat/manifests/init.pp @@ -25,6 +25,10 @@ # (Optional) Use these options to configure the RabbitMQ message system. # Defaults to 'heat.openstack.common.rpc.impl_kombu' # +# [*rpc_response_timeout*] +# (Optional) Configure the timeout (in seconds) for rpc responses +# Defaults to 60 seconds +# # [*rabbit_host*] # (Optional) IP or hostname of the rabbit server. # Defaults to '127.0.0.1' @@ -50,6 +54,21 @@ # (Optional) Virtual_host to use. # Defaults to '/' # +# [*rabbit_heartbeat_timeout_threshold*] +# (optional) Number of seconds after which the RabbitMQ broker is considered +# down if the heartbeat keepalive fails. Any value >0 enables heartbeats. +# Heartbeating helps to ensure the TCP connection to RabbitMQ isn't silently +# closed, resulting in missed or lost messages from the queue. +# (Requires kombu >= 3.0.7 and amqp >= 1.4.0) +# Defaults to 0 +# +# [*rabbit_heartbeat_rate*] +# (optional) How often during the rabbit_heartbeat_timeout_threshold period to +# check the heartbeat on RabbitMQ connection. (i.e. rabbit_heartbeat_rate=2 +# when rabbit_heartbeat_timeout_threshold=60, the heartbeat will be checked +# every 30 seconds. +# Defaults to 2 +# # [*rabbit_use_ssl*] # (Optional) Connect over SSL for RabbitMQ. # Defaults to false @@ -154,7 +173,9 @@ # [*instance_user*] # (Optional) The default user for new instances. Although heat claims that # this feature is deprecated, it still sets the users to ec2-user if -# you leave this unset. This will likely be deprecated in K or L. +# you leave this unset. If you want heat to not set instance_user to +# ec2-user, you need to set this to an empty string. This feature has been +# deprecated for some time and will likely be removed in L or M. # # [*enable_stack_adopt*] # (Optional) Enable the stack-adopt feature. @@ -189,58 +210,60 @@ # Defaults to http. # class heat( - $auth_uri = false, - $identity_uri = false, - $package_ensure = 'present', - $verbose = false, - $debug = false, - $log_dir = '/var/log/heat', - $keystone_user = 'heat', - $keystone_tenant = 'services', - $keystone_password = false, - $keystone_ec2_uri = 'http://127.0.0.1:5000/v2.0/ec2tokens', - $rpc_backend = 'heat.openstack.common.rpc.impl_kombu', - $rabbit_host = '127.0.0.1', - $rabbit_port = 5672, - $rabbit_hosts = undef, - $rabbit_userid = 'guest', - $rabbit_password = '', - $rabbit_virtual_host = '/', - $rabbit_use_ssl = false, - $kombu_ssl_ca_certs = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_keyfile = undef, - $kombu_ssl_version = 'TLSv1', - $amqp_durable_queues = false, - $qpid_hostname = 'localhost', - $qpid_port = 5672, - $qpid_username = 'guest', - $qpid_password = 'guest', - $qpid_heartbeat = 60, - $qpid_protocol = 'tcp', - $qpid_tcp_nodelay = true, - $qpid_reconnect = true, - $qpid_reconnect_timeout = 0, - $qpid_reconnect_limit = 0, - $qpid_reconnect_interval_min = 0, - $qpid_reconnect_interval_max = 0, - $qpid_reconnect_interval = 0, - $database_connection = 'sqlite:////var/lib/heat/heat.sqlite', - $database_idle_timeout = 3600, - $use_syslog = false, - $log_facility = 'LOG_USER', - $flavor = undef, - $region_name = undef, - $enable_stack_adopt = undef, - $enable_stack_abandon = undef, - $sync_db = true, + $auth_uri = false, + $identity_uri = false, + $package_ensure = 'present', + $verbose = false, + $debug = false, + $log_dir = '/var/log/heat', + $keystone_user = 'heat', + $keystone_tenant = 'services', + $keystone_password = false, + $keystone_ec2_uri = 'http://127.0.0.1:5000/v2.0/ec2tokens', + $rpc_backend = 'heat.openstack.common.rpc.impl_kombu', + $rabbit_host = '127.0.0.1', + $rabbit_port = 5672, + $rabbit_hosts = undef, + $rabbit_userid = 'guest', + $rabbit_password = '', + $rabbit_virtual_host = '/', + $rabbit_heartbeat_timeout_threshold = 0, + $rabbit_heartbeat_rate = 2, + $rabbit_use_ssl = false, + $kombu_ssl_ca_certs = undef, + $kombu_ssl_certfile = undef, + $kombu_ssl_keyfile = undef, + $kombu_ssl_version = 'TLSv1', + $amqp_durable_queues = false, + $qpid_hostname = 'localhost', + $qpid_port = 5672, + $qpid_username = 'guest', + $qpid_password = 'guest', + $qpid_heartbeat = 60, + $qpid_protocol = 'tcp', + $qpid_tcp_nodelay = true, + $qpid_reconnect = true, + $qpid_reconnect_timeout = 0, + $qpid_reconnect_limit = 0, + $qpid_reconnect_interval_min = 0, + $qpid_reconnect_interval_max = 0, + $qpid_reconnect_interval = 0, + $database_connection = 'sqlite:////var/lib/heat/heat.sqlite', + $database_idle_timeout = 3600, + $use_syslog = false, + $log_facility = 'LOG_USER', + $flavor = undef, + $region_name = undef, + $enable_stack_adopt = undef, + $enable_stack_abandon = undef, + $sync_db = true, # Deprecated parameters - $mysql_module = undef, - $sql_connection = undef, - $keystone_host = '127.0.0.1', - $keystone_port = '35357', - $keystone_protocol = 'http', - $instance_user = undef, + $mysql_module = undef, + $sql_connection = undef, + $keystone_host = '127.0.0.1', + $keystone_port = '35357', + $keystone_protocol = 'http', + $instance_user = undef, ) { include ::heat::params @@ -322,11 +345,13 @@ } heat_config { - 'oslo_messaging_rabbit/rabbit_userid' : value => $rabbit_userid; - 'oslo_messaging_rabbit/rabbit_password' : value => $rabbit_password, secret => true; - 'oslo_messaging_rabbit/rabbit_virtual_host' : value => $rabbit_virtual_host; - 'oslo_messaging_rabbit/rabbit_use_ssl' : value => $rabbit_use_ssl; - 'DEFAULT/amqp_durable_queues' : value => $amqp_durable_queues; + 'oslo_messaging_rabbit/rabbit_userid': value => $rabbit_userid; + 'oslo_messaging_rabbit/rabbit_password': value => $rabbit_password, secret => true; + 'oslo_messaging_rabbit/rabbit_virtual_host': value => $rabbit_virtual_host; + 'oslo_messaging_rabbit/heartbeat_timeout_threshold': value => $rabbit_heartbeat_timeout_threshold; + 'oslo_messaging_rabbit/heartbeat_rate': value => $rabbit_heartbeat_rate; + 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl; + 'DEFAULT/amqp_durable_queues': value => $amqp_durable_queues; } if $rabbit_use_ssl { @@ -448,6 +473,7 @@ heat_config { 'DEFAULT/rpc_backend' : value => $rpc_backend; + 'DEFAULT/rpc_response_timeout' : value => $rpc_response_timeout; 'DEFAULT/debug' : value => $debug; 'DEFAULT/verbose' : value => $verbose; 'ec2authtoken/auth_uri' : value => $keystone_ec2_uri; @@ -552,7 +578,10 @@ } # instance_user - if $instance_user { + # special case for empty string since it's a valid value + if $instance_user == '' { + heat_config { 'DEFAULT/instance_user': value => ''; } + } elsif $instance_user { heat_config { 'DEFAULT/instance_user': value => $instance_user; } } else { heat_config { 'DEFAULT/instance_user': ensure => absent; } diff --git a/heat/manifests/keystone/domain.pp b/heat/manifests/keystone/domain.pp index 35f675ff8..19b26703c 100644 --- a/heat/manifests/keystone/domain.pp +++ b/heat/manifests/keystone/domain.pp @@ -1,12 +1,23 @@ # == Class: heat::keystone::domain # -# Configures heat domain in Keystone. -# -# Note: Implementation is done by heat-keystone-setup-domain script temporarily -# because currently puppet-keystone does not support v3 API +# Configures Heat domain in Keystone. # # === Parameters # +# [*domain_name*] +# Heat domain name. Defaults to 'heat'. +# +# [*domain_admin*] +# Keystone domain admin user which will be created. Defaults to 'heat_admin'. +# +# [*domain_admin_email*] +# Keystone domain admin user email address. Defaults to 'heat_admin@localhost'. + +# [*domain_password*] +# Keystone domain admin user password. Defaults to 'changeme'. +# +# === Deprecated Parameters +# # [*auth_url*] # Keystone auth url # @@ -19,57 +30,54 @@ # [*keystone_tenant*] # Keystone admin tenant name # -# [*domain_name*] -# Heat domain name. Defaults to 'heat'. -# -# [*domain_admin*] -# Keystone domain admin user which will be created. Defaults to 'heat_admin'. -# -# [*domain_password*] -# Keystone domain admin user password. Defaults to 'changeme'. -# class heat::keystone::domain ( - $auth_url = undef, - $keystone_admin = undef, - $keystone_password = undef, - $keystone_tenant = undef, - $domain_name = 'heat', - $domain_admin = 'heat_admin', - $domain_password = 'changeme', + $domain_name = 'heat', + $domain_admin = 'heat_admin', + $domain_admin_email = 'heat_admin@localhost', + $domain_password = 'changeme', + + # DEPRECATED PARAMETERS + $auth_url = undef, + $keystone_admin = undef, + $keystone_password = undef, + $keystone_tenant = undef, ) { include ::heat::params - $cmd_evn = [ - "OS_TENANT_NAME=${keystone_tenant}", - "OS_USERNAME=${keystone_admin}", - "OS_PASSWORD=${keystone_password}", - "OS_AUTH_URL=${auth_url}", - "HEAT_DOMAIN=${domain_name}", - "HEAT_DOMAIN_ADMIN=${domain_admin}", - "HEAT_DOMAIN_PASSWORD=${domain_password}" - ] - exec { 'heat_domain_create': - path => '/usr/bin', - command => 'heat-keystone-setup-domain', - environment => $cmd_evn, - require => Package['heat-common'], - logoutput => 'on_failure' + if $auth_url { + warning('The auth_url parameter is deprecated and will be removed in future releases') } - - heat_domain_id_setter { 'heat_domain_id': - ensure => present, - domain_name => $domain_name, - auth_url => $auth_url, - auth_username => $keystone_admin, - auth_password => $keystone_password, - auth_tenant_name => $keystone_tenant, - require => Exec['heat_domain_create'], + if $keystone_admin { + warning('The keystone_admin parameter is deprecated and will be removed in future releases') + } + if $keystone_password { + warning('The keystone_password parameter is deprecated and will be removed in future releases') } + if $keystone_tenant { + warning('The keystone_tenant parameter is deprecated and will be removed in future releases') + } + + ensure_resource('keystone_domain', 'heat_domain', { + 'ensure' => 'present', + 'enabled' => true, + 'name' => $domain_name + }) + ensure_resource('keystone_user', 'heat_domain_admin', { + 'ensure' => 'present', + 'enabled' => true, + 'name' => $domain_admin, + 'email' => $domain_admin_email, + 'password' => $domain_password, + 'domain' => $domain_name, + }) + ensure_resource('keystone_user_role', "${domain_admin}@::${domain_name}", { + 'roles' => ['admin'], + }) heat_config { - 'DEFAULT/stack_domain_admin': value => $domain_admin; + 'DEFAULT/stack_domain_admin': value => $domain_admin; 'DEFAULT/stack_domain_admin_password': value => $domain_password, secret => true; + 'DEFAULT/stack_user_domain_name': value => $domain_name; } - } diff --git a/heat/metadata.json b/heat/metadata.json index 7ad7a6cbb..9834175f6 100644 --- a/heat/metadata.json +++ b/heat/metadata.json @@ -1,6 +1,6 @@ { - "name": "stackforge-heat", - "version": "5.1.0", + "name": "openstack-heat", + "version": "6.0.0", "author": "eNovance and OpenStack Contributors", "summary": "Puppet module for OpenStack Heat", "license": "Apache-2.0", @@ -32,8 +32,8 @@ "description": "Installs and configures OpenStack Heat (Orchestration).", "dependencies": [ { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0 <6.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } ] } diff --git a/heat/spec/acceptance/basic_heat_spec.rb b/heat/spec/acceptance/basic_heat_spec.rb index 1f78e4833..785af33fe 100644 --- a/heat/spec/acceptance/basic_heat_spec.rb +++ b/heat/spec/acceptance/basic_heat_spec.rb @@ -70,8 +70,9 @@ class { '::keystone': enabled => true, } class { '::keystone::roles::admin': - email => 'test@example.tld', - password => 'a_big_secret', + email => 'test@example.tld', + password => 'a_big_secret', + admin_roles => ['admin', '_member_', 'heat_stack_owner'] } class { '::keystone::endpoint': public_url => "https://${::fqdn}:5000/", @@ -93,6 +94,9 @@ class { '::heat::db::mysql': class { '::heat::keystone::auth': password => 'a_big_secret', } + class { '::heat::keystone::domain': + domain_password => 'oh_my_no_secret', + } class { '::heat::client': } class { '::heat::api': } class { '::heat::engine': diff --git a/heat/spec/classes/heat_init_spec.rb b/heat/spec/classes/heat_init_spec.rb index 83f330783..c1edc2904 100644 --- a/heat/spec/classes/heat_init_spec.rb +++ b/heat/spec/classes/heat_init_spec.rb @@ -55,6 +55,14 @@ end end + context 'with rabbit heartbeat configured' do + before { params.merge!( + :rabbit_heartbeat_timeout_threshold => '60', + :rabbit_heartbeat_rate => '10' ) } + it_configures 'a heat base installation' + it_configures 'rabbit with heartbeat configured' + end + context 'with qpid instance' do before {params.merge!(qpid_params) } @@ -189,11 +197,14 @@ is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') + is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') + is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') end it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_host').with_value( params[:rabbit_host] ) } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_port').with_value( params[:rabbit_port] ) } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_hosts').with_value( "#{params[:rabbit_host]}:#{params[:rabbit_port]}" ) } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_ha_queues').with_value('false') } + it { is_expected.to contain_heat_config('DEFAULT/rpc_response_timeout').with_value('60') } it { is_expected.to contain_heat_config('DEFAULT/amqp_durable_queues').with_value(false) } end @@ -208,6 +219,8 @@ is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') + is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') + is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') end it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_host').with_ensure('absent') } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_port').with_ensure('absent') } @@ -227,6 +240,8 @@ is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') + is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('0') + is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_rate').with_value('2') end it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_host').with_ensure('absent') } it { is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_port').with_ensure('absent') } @@ -235,6 +250,21 @@ it { is_expected.to contain_heat_config('DEFAULT/amqp_durable_queues').with_value(true) } end + shared_examples_for 'rabbit with heartbeat configured' do + it 'configures rabbit' do + is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_userid').with_value( params[:rabbit_userid] ) + is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_password').with_value( params[:rabbit_password] ) + is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_password').with_secret( true ) + is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_virtual_host').with_value( params[:rabbit_virtual_host] ) + is_expected.to contain_heat_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value(false) + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') + is_expected.to contain_heat_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') + end + it { is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value('60') } + it { is_expected.to contain_heat_config('oslo_messaging_rabbit/heartbeat_rate').with_value('10') } + end shared_examples_for 'qpid as rpc backend' do context("with default parameters") do @@ -247,6 +277,7 @@ it { is_expected.to contain_heat_config('DEFAULT/qpid_heartbeat').with_value('60') } it { is_expected.to contain_heat_config('DEFAULT/qpid_protocol').with_value('tcp') } it { is_expected.to contain_heat_config('DEFAULT/qpid_tcp_nodelay').with_value(true) } + it { is_expected.to contain_heat_config('DEFAULT/rpc_response_timeout').with_value('60') } it { is_expected.to contain_heat_config('DEFAULT/amqp_durable_queues').with_value(false) } end @@ -457,7 +488,7 @@ end end - shared_examples_for 'with instance_user set' do + shared_examples_for 'with instance_user set to a string' do before do params.merge!( :instance_user => "fred", @@ -469,6 +500,18 @@ end end + shared_examples_for 'with instance_user set to an empty string' do + before do + params.merge!( + :instance_user => "", + ) + end + + it 'has instance_user set to an empty string when specified' do + is_expected.to contain_heat_config('DEFAULT/instance_user').with_value('') + end + end + shared_examples_for 'without instance_user set' do it 'doesnt have instance_user set by default' do is_expected.to contain_heat_config('DEFAULT/instance_user').with_enure('absent') diff --git a/heat/spec/classes/heat_keystone_domain_spec.rb b/heat/spec/classes/heat_keystone_domain_spec.rb index 0eba85d82..c8d77911d 100644 --- a/heat/spec/classes/heat_keystone_domain_spec.rb +++ b/heat/spec/classes/heat_keystone_domain_spec.rb @@ -3,13 +3,10 @@ describe 'heat::keystone::domain' do let :params do { - :auth_url => 'http://127.0.0.1:35357/v2.0', - :keystone_admin => 'admin', - :keystone_password => 'admin_passwd', - :keystone_tenant => 'admin', - :domain_name => 'heat', - :domain_admin => 'heat_admin', - :domain_password => 'domain_passwd' + :domain_name => 'heat', + :domain_admin => 'heat_admin', + :domain_admin_email => 'heat_admin@localhost', + :domain_password => 'domain_passwd' } end @@ -18,34 +15,26 @@ is_expected.to contain_heat_config('DEFAULT/stack_domain_admin').with_value(params[:domain_admin]) is_expected.to contain_heat_config('DEFAULT/stack_domain_admin_password').with_value(params[:domain_password]) is_expected.to contain_heat_config('DEFAULT/stack_domain_admin_password').with_secret(true) + is_expected.to contain_heat_config('DEFAULT/stack_user_domain_name').with_value(params[:domain_name]) end - it 'should configure heat domain id' do - is_expected.to contain_heat_domain_id_setter('heat_domain_id').with( - :ensure => 'present', - :domain_name => params[:domain_name], - :auth_url => params[:auth_url], - :auth_username => params[:keystone_admin], - :auth_password => params[:keystone_password], - :auth_tenant_name => params[:keystone_tenant] + it 'should create keystone domain' do + is_expected.to contain_keystone_domain('heat_domain').with( + :ensure => 'present', + :enabled => 'true', + :name => params[:domain_name] ) - end - it 'should exec helper script' do - is_expected.to contain_exec('heat_domain_create').with( - :command => 'heat-keystone-setup-domain', - :path => '/usr/bin', - :require => 'Package[heat-common]', - :logoutput => 'on_failure', - :environment => [ - "OS_TENANT_NAME=#{params[:keystone_tenant]}", - "OS_USERNAME=#{params[:keystone_admin]}", - "OS_PASSWORD=#{params[:keystone_password]}", - "OS_AUTH_URL=#{params[:auth_url]}", - "HEAT_DOMAIN=#{params[:domain_name]}", - "HEAT_DOMAIN_ADMIN=#{params[:domain_admin]}", - "HEAT_DOMAIN_PASSWORD=#{params[:domain_password]}" - ] + is_expected.to contain_keystone_user('heat_domain_admin').with( + :ensure => 'present', + :enabled => 'true', + :name => params[:domain_admin], + :email => params[:domain_admin_email], + :password => params[:domain_password], + :domain => params[:domain_name], + ) + is_expected.to contain_keystone_user_role('heat_admin@::heat').with( + :roles => ['admin'], ) end end diff --git a/heat/spec/spec_helper_acceptance.rb b/heat/spec/spec_helper_acceptance.rb index 429e807c4..144b31e3f 100644 --- a/heat/spec/spec_helper_acceptance.rb +++ b/heat/spec/spec_helper_acceptance.rb @@ -38,7 +38,7 @@ zuul_clone_cmd += "git://git.openstack.org #{repo}" on host, zuul_clone_cmd else - on host, "git clone https://git.openstack.org/#{repo} #{repo}" + on host, "git clone -b stable/kilo https://git.openstack.org/#{repo} #{repo}" end on host, "ZUUL_REF=#{zuul_ref} ZUUL_BRANCH=#{zuul_branch} ZUUL_URL=#{zuul_url} bash #{repo}/install_modules.sh" diff --git a/heat/spec/unit/provider/heat_domain_id_setter/heat_spec.rb b/heat/spec/unit/provider/heat_domain_id_setter/heat_spec.rb deleted file mode 100644 index a6bc4d9c0..000000000 --- a/heat/spec/unit/provider/heat_domain_id_setter/heat_spec.rb +++ /dev/null @@ -1,177 +0,0 @@ -require 'spec_helper' -require 'puppet' -require 'puppet/type/heat_domain_id_setter' - -provider_class = Puppet::Type.type(:heat_domain_id_setter).provider(:ruby) - -# used to simulate an authentication response from Keystone -# (POST v2.0/tokens) -auth_response = { - 'access' => { - 'token' => { - 'id' => 'TOKEN', - } - } -} - -# used to simulate a response to GET v3/domains -domains_response = { - 'domains' => [ - { - 'name' => 'heat', - 'id' => 'UUID_HEAT' - }, - { - 'name' => 'multiple_matches_domain', - 'id' => 'UUID1' - }, - { - 'name' => 'multiple_matches_domain', - 'id' => 'UUID2' - }, - ] -} - -# Stub for ini_setting resource -Puppet::Type.newtype(:ini_setting) do -end - -# Stub for ini_setting provider -Puppet::Type.newtype(:ini_setting).provide(:ruby) do - def create - end -end - -describe 'Puppet::Type.type(:heat_keystone_domain_id_setter)' do - let :params do - { - :name => 'heat_domain_id', - :ensure => 'present', - :domain_name => 'heat', - :auth_url => 'http://127.0.0.1:35357/v2.0', - :auth_username => 'admin', - :auth_password => 'admin_passwd', - :auth_tenant_name => 'admin', - } - end - - it 'should have a non-nil provider' do - expect(provider_class).not_to be_nil - end - - context 'when url is correct' do - before :each do - stub_request(:post, "http://127.0.0.1:35357/v2.0/tokens"). - to_return(:status => 200, - :body => auth_response.to_json, - :headers => {}) - stub_request(:get, "http://127.0.0.1:35357/v3/domains"). - with(:headers => {'X-Auth-Token'=>'TOKEN'}). - to_return(:status => 200, - :body => domains_response.to_json, - :headers => {}) - end - - it 'should create a resource' do - resource = Puppet::Type::Heat_domain_id_setter.new(params) - provider = provider_class.new(resource) - expect(provider.exists?).to be_falsey - expect(provider.create).to be_nil - end - end - - # What happens if we ask for a domain that does not exist? - context 'when domain cannot be found' do - before :each do - stub_request(:post, "http://127.0.0.1:35357/v2.0/tokens"). - to_return(:status => 200, - :body => auth_response.to_json, - :headers => {}) - stub_request(:get, "http://127.0.0.1:35357/v3/domains"). - with(:headers => {'X-Auth-Token'=>'TOKEN'}). - to_return(:status => 200, - :body => domains_response.to_json, - :headers => {}) - - params.merge!(:domain_name => 'bad_domain_name') - end - - it 'should receive an api error' do - resource = Puppet::Type::Heat_domain_id_setter.new(params) - provider = provider_class.new(resource) - expect(provider.exists?).to be_falsey - expect { provider.create }.to raise_error KeystoneAPIError, /Unable to find matching domain/ - end - end - - # What happens if we ask for a domain name that results in multiple - # matches? - context 'when there are multiple matching domains' do - before :each do - stub_request(:post, "http://127.0.0.1:35357/v2.0/tokens"). - to_return(:status => 200, - :body => auth_response.to_json, - :headers => {}) - stub_request(:get, "http://127.0.0.1:35357/v3/domains"). - with(:headers => {'X-Auth-Token'=>'TOKEN'}). - to_return(:status => 200, - :body => domains_response.to_json, - :headers => {}) - - params.merge!(:domain_name => 'multiple_matches_domain') - end - - it 'should receive an api error' do - resource = Puppet::Type::Heat_domain_id_setter.new(params) - provider = provider_class.new(resource) - expect(provider.exists?).to be_falsey - expect { provider.create }.to raise_error KeystoneAPIError, /Found multiple matches for domain name/ - end - end - - # What happens if we pass a bad password? - context 'when password is incorrect' do - before :each do - stub_request(:post, "http://127.0.0.1:35357/v2.0/tokens"). - to_return(:status => 401, - :body => auth_response.to_json, - :headers => {}) - end - - it 'should receive an authentication error' do - resource = Puppet::Type::Heat_domain_id_setter.new(params) - provider = provider_class.new(resource) - expect(provider.exists?).to be_falsey - expect { provider.create }.to raise_error KeystoneAPIError - end - end - - # What happens if the server is not listening? - context 'when keystone server is unavailable' do - before :each do - stub_request(:post, "http://127.0.0.1:35357/v2.0/tokens").to_raise Errno::ECONNREFUSED - end - - it 'should receive a connection error' do - resource = Puppet::Type::Heat_domain_id_setter.new(params) - provider = provider_class.new(resource) - expect(provider.exists?).to be_falsey - expect { provider.create }.to raise_error KeystoneConnectionError - end - end - - # What happens if we mistype the hostname? - context 'when keystone server is unknown' do - before :each do - stub_request(:post, "http://127.0.0.1:35357/v2.0/tokens").to_raise SocketError, 'getaddrinfo: Name or service not known' - end - - it 'should receive a connection error' do - resource = Puppet::Type::Heat_domain_id_setter.new(params) - provider = provider_class.new(resource) - expect(provider.exists?).to be_falsey - expect { provider.create }.to raise_error KeystoneConnectionError - end - end - -end diff --git a/horizon/CHANGELOG.md b/horizon/CHANGELOG.md index df1c6c51b..196a2328a 100644 --- a/horizon/CHANGELOG.md +++ b/horizon/CHANGELOG.md @@ -1,3 +1,24 @@ +##2015-07-08 - 6.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Kilo. + +####Backwards-incompatible changes +- Remove some old deprecated parameters + +####Features +- Puppet 4.x support +- Sort policy files in local_settings.py +- Add support for Neutron DVR and L3 HA options +- Collect static files before compressing them +- Add support to add Tuskar-ui config to local_settings +- Add support for WEBROOT in local_settings +- Add 'log_handler' parameter + +####Maintenance +- Acceptance tests with Beaker +- Fix spec tests for RSpec 3.x and Puppet 4.x + ##2015-06-17 - 5.1.0 ###Summary diff --git a/horizon/README.md b/horizon/README.md index 0f775e353..caefbcbfa 100644 --- a/horizon/README.md +++ b/horizon/README.md @@ -1,7 +1,7 @@ horizon ======= -5.1.0 - 2014.2 - Juno +6.0.0 - 2015.1 - Kilo #### Table of Contents diff --git a/horizon/manifests/init.pp b/horizon/manifests/init.pp index d64b507ba..3e6f5aa25 100644 --- a/horizon/manifests/init.pp +++ b/horizon/manifests/init.pp @@ -228,7 +228,7 @@ $api_result_limit = 1000, $log_handler = 'file', $log_level = 'INFO', - $help_url = 'http://docs.openstack.org', + $help_url = 'https://www.rdoproject.org/Docs', $local_settings_template = 'horizon/local_settings.py.erb', $configure_apache = true, $bind_address = undef, diff --git a/horizon/metadata.json b/horizon/metadata.json index 1c580c18a..5dddaea48 100644 --- a/horizon/metadata.json +++ b/horizon/metadata.json @@ -1,6 +1,6 @@ { - "name": "stackforge-horizon", - "version": "5.1.0", + "name": "openstack-horizon", + "version": "6.0.0", "author": "Puppet Labs and OpenStack Contributors", "summary": "Puppet module for OpenStack Horizon", "license": "Apache-2.0", diff --git a/horizon/spec/spec_helper_acceptance.rb b/horizon/spec/spec_helper_acceptance.rb index 429e807c4..144b31e3f 100644 --- a/horizon/spec/spec_helper_acceptance.rb +++ b/horizon/spec/spec_helper_acceptance.rb @@ -38,7 +38,7 @@ zuul_clone_cmd += "git://git.openstack.org #{repo}" on host, zuul_clone_cmd else - on host, "git clone https://git.openstack.org/#{repo} #{repo}" + on host, "git clone -b stable/kilo https://git.openstack.org/#{repo} #{repo}" end on host, "ZUUL_REF=#{zuul_ref} ZUUL_BRANCH=#{zuul_branch} ZUUL_URL=#{zuul_url} bash #{repo}/install_modules.sh" diff --git a/ironic/CHANGELOG.md b/ironic/CHANGELOG.md new file mode 100644 index 000000000..6a0701c08 --- /dev/null +++ b/ironic/CHANGELOG.md @@ -0,0 +1,4 @@ +##2015-07-08 - 6.0.0 +###Summary + +- Initial release of the puppet-ironic module diff --git a/ironic/README.md b/ironic/README.md index 289f0154c..15198183d 100644 --- a/ironic/README.md +++ b/ironic/README.md @@ -1,6 +1,8 @@ puppet-ironic ============= +6.0.0 - 2015.1 - Kilo + #### Table of Contents 1. [Overview - What is the ironic module?](#overview) diff --git a/ironic/manifests/conductor.pp b/ironic/manifests/conductor.pp index 76bacbefe..c52c480e3 100644 --- a/ironic/manifests/conductor.pp +++ b/ironic/manifests/conductor.pp @@ -32,10 +32,17 @@ # Should be an interger value # Defaults to '120'. # +# [*force_power_state_during_sync*] +# (optional) Should the hardware power state be set to the state recorded in +# the database (True) or should the database be updated based on the hardware +# state (False). +# Defaults to true. +# class ironic::conductor ( - $package_ensure = 'present', - $enabled = true, - $max_time_interval = '120' + $package_ensure = 'present', + $enabled = true, + $max_time_interval = '120', + $force_power_state_during_sync = true, ) { include ::ironic::params @@ -45,6 +52,7 @@ # Configure ironic.conf ironic_config { 'conductor/max_time_interval': value => $max_time_interval; + 'conductor/force_power_state_during_sync': value => $force_power_state_during_sync; } # Install package diff --git a/ironic/metadata.json b/ironic/metadata.json index d86bbfd56..61dc596dd 100644 --- a/ironic/metadata.json +++ b/ironic/metadata.json @@ -1,10 +1,10 @@ { - "name": "stackforge-ironic", - "version": "5.0.0", - "author": "eNovance and StackForge Contributors", + "name": "openstack-ironic", + "version": "6.0.0", + "author": "eNovance and OpenStack Contributors", "summary": "Puppet module for OpenStack Ironic", "license": "Apache-2.0", - "source": "git://github.com/stackforge/puppet-ironic.git", + "source": "git://github.com/openstack/puppet-ironic.git", "project_page": "https://launchpad.net/puppet-ironic", "issues_url": "https://bugs.launchpad.net/puppet-ironic", "requirements": [ @@ -32,8 +32,8 @@ "description": "Installs and configures OpenStack Ironic (Bare metal).", "dependencies": [ { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0 <6.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } ] } diff --git a/ironic/spec/classes/ironic_conductor_spec.rb b/ironic/spec/classes/ironic_conductor_spec.rb index 105a4c885..8d0d1df08 100644 --- a/ironic/spec/classes/ironic_conductor_spec.rb +++ b/ironic/spec/classes/ironic_conductor_spec.rb @@ -23,9 +23,10 @@ describe 'ironic::conductor' do let :default_params do - { :package_ensure => 'present', - :enabled => true, - :max_time_interval => '120' } + { :package_ensure => 'present', + :enabled => true, + :max_time_interval => '120', + :force_power_state_during_sync => true } end let :params do @@ -57,14 +58,19 @@ it 'configures ironic.conf' do is_expected.to contain_ironic_config('conductor/max_time_interval').with_value(p[:max_time_interval]) + is_expected.to contain_ironic_config('conductor/force_power_state_during_sync').with_value(p[:force_power_state_during_sync]) end context 'when overriding parameters' do before :each do - params.merge!(:max_time_interval => '50') + params.merge!( + :max_time_interval => '50', + :force_power_state_during_sync => false + ) end it 'should replace default parameter with new value' do is_expected.to contain_ironic_config('conductor/max_time_interval').with_value(p[:max_time_interval]) + is_expected.to contain_ironic_config('conductor/force_power_state_during_sync').with_value(p[:force_power_state_during_sync]) end end diff --git a/ironic/spec/spec_helper_acceptance.rb b/ironic/spec/spec_helper_acceptance.rb index 429e807c4..144b31e3f 100644 --- a/ironic/spec/spec_helper_acceptance.rb +++ b/ironic/spec/spec_helper_acceptance.rb @@ -38,7 +38,7 @@ zuul_clone_cmd += "git://git.openstack.org #{repo}" on host, zuul_clone_cmd else - on host, "git clone https://git.openstack.org/#{repo} #{repo}" + on host, "git clone -b stable/kilo https://git.openstack.org/#{repo} #{repo}" end on host, "ZUUL_REF=#{zuul_ref} ZUUL_BRANCH=#{zuul_branch} ZUUL_URL=#{zuul_url} bash #{repo}/install_modules.sh" diff --git a/keystone/CHANGELOG.md b/keystone/CHANGELOG.md index 973da8fb1..33c0f5e06 100644 --- a/keystone/CHANGELOG.md +++ b/keystone/CHANGELOG.md @@ -1,3 +1,43 @@ +##2015-07-08 - 6.0.0 +###Summary + +This is a backwards-incompatible major release for OpenStack Kilo. + +####Backwards-incompatible changes +- Remove deprecated parameters +- MySQL: change default MySQL collate to utf8_general_ci +- Move openstackclient to openstacklib + +####Features +- Puppet 4.x support +- Support Keystone v3 API +- Allow disabling or delaying the token_flush cron +- Migrate postgresql backend to use openstacklib::db::postgresql +- Add max_token_size optional parameter +- Add admin_workers and public_workers configuration options +- Add support for LDAP connection pools +- Add a package ensure for openstackclient +- Enable setting the revoke/token driver +- Add manage_service feature +- Makes distinct use of url vs auth_url +- Create a sync_db boolean for Keystone +- LDAP: add support to configure credential driver +- Support notification_format +- Allow custom file source for wsgi scripts +- Decouple sync_db from enabled +- Add support for Fernet Tokens + +####Bugfixes +- Crontab: ensure the script is run with bash shell +- Copy latest keystone.py from Keystone upstream +- Fix deprecated LDAP config options +- Fix service keystone conflict when running in apache + +####Maintenance +- Acceptance tests with Beaker +- Fix spec tests for RSpec 3.x and Puppet 4.x +- Restructures authentication for resource providers + ##2015-06-17 - 5.1.0 ###Summary diff --git a/keystone/README.md b/keystone/README.md index df6023740..e35380cc9 100644 --- a/keystone/README.md +++ b/keystone/README.md @@ -1,7 +1,7 @@ keystone ======= -5.1.0 - 2014.2 - Juno +6.0.0 - 2015.1 - Kilo #### Table of Contents diff --git a/keystone/examples/v3_basic.pp b/keystone/examples/v3_basic.pp new file mode 100644 index 000000000..5777d2b5b --- /dev/null +++ b/keystone/examples/v3_basic.pp @@ -0,0 +1,47 @@ +# Example using v3 domains. The admin user is created in the domain +# named 'admin_domain', and assigned the role 'admin' in the 'admin' +# project in the domain 'admin_domain'. The keystone service account is +# created in default domain, and assigned the +# role 'admin' in the project 'services' in the default domain. +# NOTE: Until all of the other services support using Keystone v3 +# with keystone_authtoken middleware that supports v3, they cannot +# specify a domain for authentication, and so have to be in the +# default domain. +# +# To be sure everything is working, run: +# $ export OS_IDENTITY_API_VERSION=3 +# $ export OS_USERNAME=admin +# $ export OS_USER_DOMAIN_NAME=admin_domain +# $ export OS_PASSWORD=ChangeMe +# $ export OS_PROJECT_NAME=admin +# $ export OS_PROJECT_DOMAIN_NAME=admin_domain +# $ export OS_AUTH_URL=http://keystone.local:35357/v3 +# $ openstack user list +# + +Exec { logoutput => 'on_failure' } + + +class { '::mysql::server': } +class { '::keystone::db::mysql': + password => 'keystone', +} +class { '::keystone': + verbose => true, + debug => true, + database_connection => 'mysql://keystone:keystone@127.0.0.1/keystone', + admin_token => 'admin_token', + enabled => true, +} +class { '::keystone::roles::admin': + email => 'test@example.tld', + password => 'a_big_secret', + admin => 'admin', # username + admin_tenant => 'admin', # project name + admin_user_domain => 'admin', # domain for user + admin_tenant_domain => 'admin', # domain for project +} +class { '::keystone::endpoint': + public_url => 'http://127.0.0.1:5000/', + admin_url => 'http://127.0.0.1:35357/', +} diff --git a/keystone/ext/keystone_test_v3.rb b/keystone/ext/keystone_test_v3.rb new file mode 100644 index 000000000..0d7550e32 --- /dev/null +++ b/keystone/ext/keystone_test_v3.rb @@ -0,0 +1,64 @@ +#!/usr/bin/env ruby +# this script verifies that keystone has +# been successfully installed using the instructions +# found here: http://keystone.openstack.org/configuration.html +# and can use the v3 api http://developer.openstack.org/api-ref-identity-v3.html + +begin + require 'rubygems' +rescue + puts 'Could not require rubygems. This assumes puppet is not installed as a gem' +end +require 'open3' +require 'fileutils' +require 'puppet' +require 'pp' + +username='admin' +password='a_big_secret' +# required to get a real services catalog +project='openstack' +user_domain='admin' +project_domain='admin' + +# shared secret +service_token='admin_token' + +def run_command(cmd) + Open3.popen3(cmd) do |stdin, stdout, stderr| + begin + stdout = stdout.read + puts "Response from token request:#{stdout}" + return stdout + rescue Exception => e + puts "Request failed, this sh*t is borked :( : details: #{e}" + exit 1 + end + end +end + +puts `puppet apply -e "package {curl: ensure => present }"` +get_token = %(curl -D - -d '{"auth":{"identity":{"methods":["password"],"password":{"user":{"domain":{"name":"#{user_domain}"},"name":"#{username}","password": "#{password}"}}},"scope":{"project":{"domain":{"name":"#{project_domain}"},"name": "#{project}"}}}}' -H "Content-type: application/json" http://localhost:35357/v3/auth/tokens) +token = nil + +puts "Running auth command: #{get_token}" +rawoutput = run_command(get_token) +if rawoutput =~ /X-Subject-Token: ([\w]+)/ + token = $1 +else + puts "No token in output! #{rawoutput}" + exit 1 +end + +if token + puts "We were able to retrieve a token" + puts token + verify_token = "curl -H 'X-Auth-Token: #{service_token}' 'X-Subject-Token: #{token}' http://localhost:35357/v3/auth/tokens" + puts 'verifying token' + run_command(verify_token) + ['endpoints', 'projects', 'users'].each do |x| + puts "getting #{x}" + get_keystone_data = "curl -H 'X-Auth-Token: #{token}' http://localhost:35357/v3/#{x}" + pp PSON.load(run_command(get_keystone_data)) + end +end diff --git a/keystone/lib/puppet/provider/keystone.rb b/keystone/lib/puppet/provider/keystone.rb index 849868bcb..64f46de50 100644 --- a/keystone/lib/puppet/provider/keystone.rb +++ b/keystone/lib/puppet/provider/keystone.rb @@ -10,25 +10,120 @@ class Puppet::Provider::Keystone < Puppet::Provider::Openstack INI_FILENAME = '/etc/keystone/keystone.conf' - def self.get_endpoint + @@default_domain_id = nil + + def self.admin_endpoint + @admin_endpoint ||= get_admin_endpoint + end + + def self.admin_token + @admin_token ||= get_admin_token + end + + def self.clean_host(host) + host ||= '127.0.0.1' + case host + when '0.0.0.0' + return '127.0.0.1' + when '::0' + return '[::1]' + else + return host + end + end + + def self.default_domain + domain_name_from_id(default_domain_id) + end + + def self.default_domain_id + if @@default_domain_id + @@default_domain_id + elsif keystone_file and keystone_file['identity'] and keystone_file['identity']['default_domain_id'] + keystone_file['identity']['default_domain_id'].strip + else + 'default' + end + end + + def self.default_domain_id=(id) + @@default_domain_id = id + end + + def self.domain_name_from_id(id) + unless @domain_hash + list = request('domain', 'list') + @domain_hash = Hash[list.collect{|domain| [domain[:id], domain[:name]]}] + end + unless @domain_hash.include?(id) + name = request('domain', 'show', id)[:name] + @domain_hash[id] = name if name + end + unless @domain_hash.include?(id) + err("Could not find domain with id [#{id}]") + end + @domain_hash[id] + end + + def self.get_admin_endpoint endpoint = nil + if keystone_file + if url = get_section('DEFAULT', 'admin_endpoint') + endpoint = url.chomp('/') + else + admin_port = get_section('DEFAULT', 'admin_port') || '35357' + host = clean_host(get_section('DEFAULT', 'admin_bind_host')) + protocol = ssl? ? 'https' : 'http' + endpoint = "#{protocol}://#{host}:#{admin_port}" + end + end + return endpoint + end + + def self.get_admin_token + get_section('DEFAULT', 'admin_token') + end + + def self.get_auth_url + auth_url = nil if ENV['OS_AUTH_URL'] - endpoint = ENV['OS_AUTH_URL'] + auth_url = ENV['OS_AUTH_URL'].dup + elsif auth_url = get_os_vars_from_rcfile(rc_filename)['OS_AUTH_URL'] else - endpoint = get_os_vars_from_rcfile(rc_filename)['OS_AUTH_URL'] - unless endpoint - # This is from legacy but seems wrong, we want auth_url not url! - endpoint = get_admin_endpoint - end + auth_url = admin_endpoint end - unless endpoint - raise(Puppet::Error::OpenstackAuthInputError, 'Could not find auth url to check user password.') + return auth_url + end + + def self.get_section(group, name) + if keystone_file && keystone_file[group] && keystone_file[group][name] + return keystone_file[group][name].strip end - endpoint + return nil end - def self.admin_endpoint - @admin_endpoint ||= get_admin_endpoint + def self.get_service_url + service_url = nil + if ENV['OS_URL'] + service_url = ENV['OS_URL'].dup + elsif admin_endpoint + service_url = admin_endpoint + service_url << "/v#{@credentials.version}" + end + return service_url + end + + def self.ini_filename + INI_FILENAME + end + + def self.keystone_file + return @keystone_file if @keystone_file + if File.exists?(ini_filename) + @keystone_file = Puppet::Util::IniConfig::File.new + @keystone_file.read(ini_filename) + @keystone_file + end end # use the domain in this order: @@ -56,54 +151,6 @@ def self.name_and_domain(namedomstr, domain_from_resource=nil, default_domain_na ret end - def self.admin_token - @admin_token ||= get_admin_token - end - - def self.get_admin_token - if keystone_file and keystone_file['DEFAULT'] and keystone_file['DEFAULT']['admin_token'] - return "#{keystone_file['DEFAULT']['admin_token'].strip}" - else - return nil - end - end - - def self.get_admin_endpoint - if keystone_file - if keystone_file['DEFAULT'] - if keystone_file['DEFAULT']['admin_endpoint'] - auth_url = keystone_file['DEFAULT']['admin_endpoint'].strip.chomp('/') - return "#{auth_url}/v#{@credentials.version}/" - end - - if keystone_file['DEFAULT']['admin_port'] - admin_port = keystone_file['DEFAULT']['admin_port'].strip - else - admin_port = '35357' - end - - if keystone_file['DEFAULT']['admin_bind_host'] - host = keystone_file['DEFAULT']['admin_bind_host'].strip - if host == "0.0.0.0" - host = "127.0.0.1" - elsif host == '::0' - host = '[::1]' - end - else - host = "127.0.0.1" - end - end - - if keystone_file['ssl'] && keystone_file['ssl']['enable'] && keystone_file['ssl']['enable'].strip.downcase == 'true' - protocol = 'https' - else - protocol = 'http' - end - end - - "#{protocol}://#{host}:#{admin_port}/v#{@credentials.version}/" - end - def self.request(service, action, properties=nil) super rescue Puppet::Error::OpenstackAuthInputError => error @@ -112,48 +159,21 @@ def self.request(service, action, properties=nil) def self.request_by_service_token(service, action, error, properties=nil) properties ||= [] - @credentials.token = get_admin_token - @credentials.url = get_admin_endpoint + @credentials.token = admin_token + @credentials.url = service_url raise error unless @credentials.service_token_set? Puppet::Provider::Openstack.request(service, action, properties, @credentials) end - def self.ini_filename - INI_FILENAME - end - - def self.default_domain - domain_hash[default_domain_id] - end - - def self.domain_hash - return @domain_hash if @domain_hash - list = request('domain', 'list') - @domain_hash = Hash[list.collect{|domain| [domain[:id], domain[:name]]}] - @domain_hash - end - - def self.domain_name_from_id(id) - domain_hash[id] - end - - def self.default_domain_id - return @default_domain_id if @default_domain_id - if keystone_file and keystone_file['identity'] and keystone_file['identity']['default_domain_id'] - @default_domain_id = "#{keystone_file['identity']['default_domain_id'].strip}" - else - @default_domain_id = 'default' - end - @default_domain_id + def self.service_url + @service_url ||= get_service_url end - def self.keystone_file - return @keystone_file if @keystone_file - if File.exists?(ini_filename) - @keystone_file = Puppet::Util::IniConfig::File.new - @keystone_file.read(ini_filename) - @keystone_file + def self.ssl? + if keystone_file && keystone_file['ssl'] && keystone_file['ssl']['enable'] && keystone_file['ssl']['enable'].strip.downcase == 'true' + return true end + return false end # Helper functions to use on the pre-validated enabled field diff --git a/keystone/lib/puppet/provider/keystone_domain/openstack.rb b/keystone/lib/puppet/provider/keystone_domain/openstack.rb index 14a8f6982..134b46d08 100644 --- a/keystone/lib/puppet/provider/keystone_domain/openstack.rb +++ b/keystone/lib/puppet/provider/keystone_domain/openstack.rb @@ -99,6 +99,7 @@ def ensure_default_domain(create, destroy=false, value=nil) end if changed self.class.keystone_file.store + self.class.default_domain_id = newid debug("The default_domain_id was changed from #{curid} to #{newid}") end end diff --git a/keystone/lib/puppet/provider/keystone_tenant/openstack.rb b/keystone/lib/puppet/provider/keystone_tenant/openstack.rb index 57a299fd6..ac1dfb107 100644 --- a/keystone/lib/puppet/provider/keystone_tenant/openstack.rb +++ b/keystone/lib/puppet/provider/keystone_tenant/openstack.rb @@ -32,6 +32,7 @@ def create properties << project_domain end @property_hash = self.class.request('project', 'create', properties) + @property_hash[:name] = resource[:name] @property_hash[:ensure] = :present end diff --git a/keystone/lib/puppet/provider/keystone_user/openstack.rb b/keystone/lib/puppet/provider/keystone_user/openstack.rb index 98a34cd3c..e6f2486dc 100644 --- a/keystone/lib/puppet/provider/keystone_user/openstack.rb +++ b/keystone/lib/puppet/provider/keystone_user/openstack.rb @@ -7,7 +7,7 @@ desc "Provider to manage keystone users." - @credentials = Puppet::Provider::Openstack::CredentialsV2_0.new + @credentials = Puppet::Provider::Openstack::CredentialsV3.new def initialize(value={}) super(value) @@ -15,7 +15,9 @@ def initialize(value={}) end def create - properties = [resource[:name]] + # see if resource[:domain], or user specified as user::domain + user_name, user_domain = self.class.name_and_domain(resource[:name], resource[:domain]) + properties = [user_name] if resource[:enabled] == :true properties << '--enable' elsif resource[:enabled] == :false @@ -24,18 +26,28 @@ def create if resource[:password] properties << '--password' << resource[:password] end - if resource[:tenant] - properties << '--project' << resource[:tenant] - end if resource[:email] properties << '--email' << resource[:email] end - self.class.request('user', 'create', properties) + if user_domain + properties << '--domain' + properties << user_domain + end + @property_hash = self.class.request('user', 'create', properties) + @property_hash[:name] = resource[:name] + @property_hash[:domain] = user_domain + if resource[:tenant] + # DEPRECATED - To be removed in next release (Liberty) + # https://bugs.launchpad.net/puppet-keystone/+bug/1472437 + project_id = Puppet::Resource.indirection.find("Keystone_tenant/#{resource[:tenant]}")[:id] + set_project(resource[:tenant], project_id) + @property_hash[:tenant] = resource[:tenant] + end @property_hash[:ensure] = :present end def destroy - self.class.request('user', 'delete', @property_hash[:id]) + self.class.request('user', 'delete', id) @property_hash.clear end @@ -82,27 +94,44 @@ def id end def password - res = nil - return res if resource[:password] == nil + passwd = nil + return passwd if resource[:password] == nil if resource[:enabled] == :false || resource[:replace_password] == :false # Unchanged password - res = resource[:password] + passwd = resource[:password] else # Password validation - credentials = Puppet::Provider::Openstack::CredentialsV2_0.new - credentials.auth_url = self.class.get_endpoint - credentials.password = resource[:password] - credentials.project_name = resource[:tenant] - credentials.username = resource[:name] + credentials = Puppet::Provider::Openstack::CredentialsV3.new + unless auth_url = self.class.get_auth_url + raise(Puppet::Error::OpenstackAuthInputError, "Could not find authentication url to validate user's password.") + end + auth_url << "/v#{credentials.version}" unless auth_url =~ /\/v\d(\.\d)?$/ + credentials.auth_url = auth_url + credentials.password = resource[:password] + credentials.user_id = id + + # NOTE: The only reason we use username is so that the openstack provider + # will know we are doing v3password auth - otherwise, it is not used. The + # user_id uniquely identifies the user including domain. + credentials.username, unused = self.class.name_and_domain(resource[:name], domain) + # Need to specify a project id to get a project scoped token. List + # all of the projects for the user, and use the id from the first one. + projects = self.class.request('project', 'list', ['--user', id, '--long']) + if projects && projects[0] && projects[0][:id] + credentials.project_id = projects[0][:id] + else + # last chance - try a domain scoped token + credentials.domain_name = domain + end begin token = Puppet::Provider::Openstack.request('token', 'issue', ['--format', 'value'], credentials) rescue Puppet::Error::OpenstackUnauthorizedError # password is invalid else - res = resource[:password] unless token.empty? + passwd = resource[:password] unless token.empty? end end - return res + return passwd end def password=(value) @@ -117,6 +146,51 @@ def replace_password=(value) @property_flush[:replace_password] = value end + def find_project_for_user(projname, project_id = nil) + # DEPRECATED - To be removed in next release (Liberty) + # https://bugs.launchpad.net/puppet-keystone/+bug/1472437 + user_name, user_domain = self.class.name_and_domain(resource[:name], resource[:domain]) + project_name, project_domain = self.class.name_and_domain(projname, nil, user_domain) + self.class.request('project', 'list', ['--user', id, '--long']).each do |project| + if (project_id == project[:id]) || + ((projname == project_name) && (project_domain == self.class.domain_name_from_id(project[:domain_id]))) + return projname + end + end + return nil + end + + def set_project(newproject, project_id = nil) + # DEPRECATED - To be removed in next release (Liberty) + # https://bugs.launchpad.net/puppet-keystone/+bug/1472437 + unless project_id + project_id = Puppet::Resource.indirection.find("Keystone_tenant/#{newproject}")[:id] + end + # Currently the only way to assign a user to a tenant not using user-create + # is to use role-add - this means we also need a role - there is usual + # a default role called _member_ which can be used for this purpose. What + # usually happens in a puppet module is that immediately after calling + # keystone_user, the module will then assign a role to that user. It is + # ok for a user to have the _member_ role and another role. + default_role = "_member_" + begin + self.class.request('role', 'show', default_role) + rescue + self.class.request('role', 'create', default_role) + end + # finally, assign the user to the project with the role + self.class.request('role', 'add', [default_role, '--project', project_id, '--user', id]) + newproject + end + + # DEPRECATED - To be removed in next release (Liberty) + # https://bugs.launchpad.net/puppet-keystone/+bug/1472437 + def tenant=(value) + @property_hash[:tenant] = set_project(value) + end + + # DEPRECATED - To be removed in next release (Liberty) + # https://bugs.launchpad.net/puppet-keystone/+bug/1472437 def tenant return resource[:tenant] if sym_to_bool(resource[:ignore_default_tenant]) # use the one returned from instances @@ -130,40 +204,52 @@ def tenant if tenant_name.nil? or tenant_name.empty? return nil # nothing found, nothing given end - # If the user list command doesn't report the project, it might still be there - # We don't need to know exactly what it is, we just need to know whether it's - # the one we're trying to set. - roles = self.class.request('user role', 'list', [resource[:name], '--project', tenant_name]) - if roles.empty? - return nil - else - return tenant_name - end + project_id = Puppet::Resource.indirection.find("Keystone_tenant/#{tenant_name}")[:id] + find_project_for_user(tenant_name, project_id) end - def tenant=(value) - self.class.request('user', 'set', [resource[:name], '--project', value]) - rescue Puppet::ExecutionFailure => e - if e.message =~ /You are not authorized to perform the requested action: LDAP user update/ - # read-only LDAP identity backend - just fall through - else - raise e - end - # note: read-write ldap will silently fail, not raise an exception - else - @property_hash[:tenant] = self.class.set_project(value, resource[:name]) + def domain + @property_hash[:domain] + end + + def domain_id + @property_hash[:domain_id] end def self.instances - list = request('user', 'list', '--long') - list.collect do |user| + instance_hash = {} + request('user', 'list', ['--long']).each do |user| + # The field says "domain" but it is really the domain_id + domname = domain_name_from_id(user[:domain]) + if instance_hash.include?(user[:name]) # not unique + curdomid = instance_hash[user[:name]][:domain] + if curdomid != default_domain_id + # Move the user from the short name slot to the long name slot + # because it is not in the default domain. + curdomname = domain_name_from_id(curdomid) + instance_hash["#{user[:name]}::#{curdomname}"] = instance_hash[user[:name]] + # Use the short name slot for the new user + instance_hash[user[:name]] = user + else + # Use the long name for the new user + instance_hash["#{user[:name]}::#{domname}"] = user + end + else + # Unique (for now) - store in short name slot + instance_hash[user[:name]] = user + end + end + instance_hash.keys.collect do |user_name| + user = instance_hash[user_name] new( - :name => user[:name], + :name => user_name, :ensure => :present, :enabled => user[:enabled].downcase.chomp == 'true' ? true : false, :password => user[:password], - :project => user[:project], :email => user[:email], + :description => user[:description], + :domain => domain_name_from_id(user[:domain]), + :domain_id => user[:domain], :id => user[:id] ) end @@ -171,34 +257,19 @@ def self.instances def self.prefetch(resources) users = instances - resources.keys.each do |name| - if provider = users.find{ |user| user.name == name } - resources[name].provider = provider + resources.each do |resname, resource| + # resname may be specified as just "name" or "name::domain" + name, resdomain = name_and_domain(resname, resource[:domain]) + provider = users.find do |user| + # have a match if the full instance name matches the full resource name, OR + # the base resource name matches the base instance name, and the + # resource domain matches the instance domain + username, user_domain = name_and_domain(user.name, user.domain) + (user.name == resname) || + ((username == name) && (user_domain == resdomain)) end + resource.provider = provider if provider end end - def self.set_project(newproject, name) - # some backends do not store the project/tenant in the user object, so we have to - # to modify the project/tenant instead - # First, see if the project actually needs to change - roles = request('user role', 'list', [name, '--project', newproject]) - unless roles.empty? - return # if already set, just skip - end - # Currently the only way to assign a user to a tenant not using user-create - # is to use user-role-add - this means we also need a role - there is usual - # a default role called _member_ which can be used for this purpose. What - # usually happens in a puppet module is that immediately after calling - # keystone_user, the module will then assign a role to that user. It is - # ok for a user to have the _member_ role and another role. - default_role = "_member_" - begin - request('role', 'show', [default_role]) - rescue - debug("Keystone role #{default_role} does not exist - creating") - request('role', 'create', [default_role]) - end - request('role', 'add', [default_role, '--project', newproject, '--user', name]) - end end diff --git a/keystone/lib/puppet/provider/keystone_user_role/openstack.rb b/keystone/lib/puppet/provider/keystone_user_role/openstack.rb index da2b87044..e670a6750 100644 --- a/keystone/lib/puppet/provider/keystone_user_role/openstack.rb +++ b/keystone/lib/puppet/provider/keystone_user_role/openstack.rb @@ -1,4 +1,5 @@ require 'puppet/provider/keystone' +require 'puppet/provider/keystone/util' Puppet::Type.type(:keystone_user_role).provide( :openstack, @@ -7,7 +8,7 @@ desc "Provider to manage keystone role assignments to users." - @credentials = Puppet::Provider::Openstack::CredentialsV2_0.new + @credentials = Puppet::Provider::Openstack::CredentialsV3.new def initialize(value={}) super(value) @@ -15,9 +16,6 @@ def initialize(value={}) end def create - properties = [] - properties << '--project' << get_project - properties << '--user' << get_user if resource[:roles] resource[:roles].each do |role| self.class.request('role', 'add', [role] + properties) @@ -26,9 +24,6 @@ def create end def destroy - properties = [] - properties << '--project' << get_project - properties << '--user' << get_user if @property_hash[:roles] @property_hash[:roles].each do |role| self.class.request('role', 'remove', [role] + properties) @@ -38,10 +33,8 @@ def destroy end def exists? - if @user_role_hash - return ! @property_hash[:name].empty? - else - roles = self.class.request('user role', 'list', [get_user, '--project', get_project]) + if self.class.user_role_hash.nil? || self.class.user_role_hash.empty? + roles = self.class.request('role', 'list', properties) # Since requesting every combination of users, roles, and # projects is so expensive, construct the property hash here # instead of in self.instances so it can be used in the role @@ -55,8 +48,8 @@ def exists? role[:name] end end - return @property_hash[:ensure] == :present end + return @property_hash[:ensure] == :present end def roles @@ -68,13 +61,11 @@ def roles=(value) # determine the roles to be added and removed remove = current_roles - Array(value) add = Array(value) - current_roles - user = get_user - project = get_project add.each do |role_name| - self.class.request('role', 'add', [role_name, '--project', project, '--user', user]) + self.class.request('role', 'add', [role_name] + properties) end remove.each do |role_name| - self.class.request('role', 'remove', [role_name, '--project', project, '--user', user]) + self.class.request('role', 'remove', [role_name] + properties) end end @@ -91,6 +82,19 @@ def self.instances private + def properties + properties = [] + if get_project_id + properties << '--project' << get_project_id + elsif get_domain + properties << '--domain' << get_domain + else + error("No project or domain specified for role") + end + properties << '--user' << get_user_id + properties + end + def get_user resource[:name].rpartition('@').first end @@ -99,12 +103,67 @@ def get_project resource[:name].rpartition('@').last end + # if the role is for a domain, it will be specified as + # user@::domain - the "project" part will be empty + def get_domain + # use defined because @domain may be nil + return @domain if defined?(@domain) + projname, domname = Util.split_domain(get_project) + if projname.nil? + @domain = domname # no project specified, so must be a domain + else + @domain = nil # not a domain specific role + end + @domain + end + + def get_user_id + @user_id ||= Puppet::Resource.indirection.find("Keystone_user/#{get_user}")[:id] + end + + def get_project_id + # use defined because @project_id may be nil + return @project_id if defined?(@project_id) + projname, domname = Util.split_domain(get_project) + if projname.nil? + @project_id = nil + else + @project_id ||= Puppet::Resource.indirection.find("Keystone_tenant/#{get_project}")[:id] + end + @project_id + end + def self.get_projects - request('project', 'list').collect { |project| project[:name] } + request('project', 'list', '--long').collect do |project| + { + :id => project[:id], + :name => project[:name], + :domain_id => project[:domain_id], + :domain => domain_name_from_id(project[:domain_id]) + } + end end - def self.get_users(project) - request('user', 'list', ['--project', project]).collect { |user| user[:name] } + def self.get_users(project_id=nil, domain_id=nil) + properties = ['--long'] + if project_id + properties << '--project' << project_id + elsif domain_id + properties << '--domain' << domain_id + end + request('user', 'list', properties).collect do |user| + { + :id => user[:id], + :name => user[:name], + # note - column is "Domain" but it is really the domain id + :domain_id => user[:domain], + :domain => domain_name_from_id(user[:domain]) + } + end + end + + def self.user_role_hash + @user_role_hash end def self.set_user_role_hash(user_role_hash) @@ -112,16 +171,32 @@ def self.set_user_role_hash(user_role_hash) end def self.build_user_role_hash - hash = @user_role_hash || {} + # The new hash will have the property that if the + # given key does not exist, create it with an empty + # array as the value for the hash key + hash = @user_role_hash || Hash.new{|h,k| h[k] = []} return hash unless hash.empty? - projects = get_projects - projects.each do |project| - users = get_users(project) - users.each do |user| - user_roles = request('user role', 'list', [user, '--project', project]) - hash["#{user}@#{project}"] = [] - user_roles.each do |role| - hash["#{user}@#{project}"] << role[:name] + # Need a mapping of project id to names. + project_hash = {} + Puppet::Type.type(:keystone_tenant).provider(:openstack).instances.each do |project| + project_hash[project.id] = project.name + end + # Need a mapping of user id to names. + user_hash = {} + Puppet::Type.type(:keystone_user).provider(:openstack).instances.each do |user| + user_hash[user.id] = user.name + end + # need a mapping of role id to name + role_hash = {} + request('role', 'list').each {|role| role_hash[role[:id]] = role[:name]} + # now, get all role assignments + request('role assignment', 'list').each do |assignment| + if assignment[:user] + if assignment[:project] + hash["#{user_hash[assignment[:user]]}@#{project_hash[assignment[:project]]}"] << role_hash[assignment[:role]] + else + domainname = domain_id_to_name(assignment[:domain]) + hash["#{user_hash[assignment[:user]]}@::#{domainname}"] << role_hash[assignment[:role]] end end end diff --git a/keystone/lib/puppet/type/keystone_domain.rb b/keystone/lib/puppet/type/keystone_domain.rb index 4a2d77736..687da2826 100644 --- a/keystone/lib/puppet/type/keystone_domain.rb +++ b/keystone/lib/puppet/type/keystone_domain.rb @@ -46,8 +46,8 @@ end # we should not do anything until the keystone service is started - autorequire(:service) do - 'keystone' + autorequire(:anchor) do + ['keystone_started'] end diff --git a/keystone/lib/puppet/type/keystone_endpoint.rb b/keystone/lib/puppet/type/keystone_endpoint.rb index 43c5eb2ea..55d69f427 100644 --- a/keystone/lib/puppet/type/keystone_endpoint.rb +++ b/keystone/lib/puppet/type/keystone_endpoint.rb @@ -31,8 +31,8 @@ end # we should not do anything until the keystone service is started - autorequire(:service) do - ['keystone'] + autorequire(:anchor) do + ['keystone_started'] end autorequire(:keystone_service) do diff --git a/keystone/lib/puppet/type/keystone_role.rb b/keystone/lib/puppet/type/keystone_role.rb index 3636afb27..15ec26a5c 100644 --- a/keystone/lib/puppet/type/keystone_role.rb +++ b/keystone/lib/puppet/type/keystone_role.rb @@ -22,7 +22,7 @@ end # we should not do anything until the keystone service is started - autorequire(:service) do - ['keystone'] + autorequire(:anchor) do + ['keystone_started'] end end diff --git a/keystone/lib/puppet/type/keystone_service.rb b/keystone/lib/puppet/type/keystone_service.rb index a4be4edae..5fb2933c3 100644 --- a/keystone/lib/puppet/type/keystone_service.rb +++ b/keystone/lib/puppet/type/keystone_service.rb @@ -35,7 +35,7 @@ # config is configured IF we need them for authentication. # If there is no keystone config, authentication credentials # need to come from another source. - autorequire(:service) do - ['keystone'] + autorequire(:anchor) do + ['keystone_started'] end end diff --git a/keystone/lib/puppet/type/keystone_tenant.rb b/keystone/lib/puppet/type/keystone_tenant.rb index 449ccd04e..4bddd8185 100644 --- a/keystone/lib/puppet/type/keystone_tenant.rb +++ b/keystone/lib/puppet/type/keystone_tenant.rb @@ -53,7 +53,7 @@ def insync?(is) # config is configured IF we need them for authentication. # If there is no keystone config, authentication credentials # need to come from another source. - autorequire(:service) do - ['keystone'] + autorequire(:anchor) do + ['keystone_started','default_domain_created'] end end diff --git a/keystone/lib/puppet/type/keystone_user.rb b/keystone/lib/puppet/type/keystone_user.rb index b484e7c5f..5c13d762c 100644 --- a/keystone/lib/puppet/type/keystone_user.rb +++ b/keystone/lib/puppet/type/keystone_user.rb @@ -2,6 +2,8 @@ File.expand_path('../..', File.dirname(__FILE__)).tap { |dir| $LOAD_PATH.unshift(dir) unless $LOAD_PATH.include?(dir) } File.expand_path('../../../../openstacklib/lib', File.dirname(__FILE__)).tap { |dir| $LOAD_PATH.unshift(dir) unless $LOAD_PATH.include?(dir) } +require 'puppet/provider/keystone/util' + Puppet::Type.newtype(:keystone_user) do desc 'Type for managing keystone users.' @@ -13,6 +15,11 @@ end newparam(:ignore_default_tenant) do + # DEPRECATED - To be removed in next release (Liberty) + # https://bugs.launchpad.net/puppet-keystone/+bug/1472437 + validate do |v| + Puppet.warning('The ignore_default_tenant parameter is deprecated and will be removed in the future.') + end newvalues(/(t|T)rue/, /(f|F)alse/, true, false) defaultto(false) munge do |value| @@ -48,6 +55,11 @@ def should_to_s( newvalue ) end newproperty(:tenant) do + # DEPRECATED - To be removed in next release (Liberty) + # https://bugs.launchpad.net/puppet-keystone/+bug/1472437 + validate do |v| + Puppet.warning('The tenant parameter is deprecated and will be removed in the future. Please use keystone_user_role to assign a user to a project.') + end newvalues(/\S+/) end @@ -69,12 +81,27 @@ def should_to_s( newvalue ) end end + newproperty(:domain) do + newvalues(nil, /\S+/) + def insync?(is) + raise(Puppet::Error, "The domain cannot be changed from #{self.should} to #{is}") unless self.should == is + true + end + end + autorequire(:keystone_tenant) do + # DEPRECATED - To be removed in next release (Liberty) + # https://bugs.launchpad.net/puppet-keystone/+bug/1472437 self[:tenant] end + autorequire(:keystone_domain) do + # use the domain parameter if given, or the one from name if any + self[:domain] or Util.split_domain(self[:name])[1] + end + # we should not do anything until the keystone service is started - autorequire(:service) do - ['keystone'] + autorequire(:anchor) do + ['keystone_started','default_domain_created'] end end diff --git a/keystone/lib/puppet/type/keystone_user_role.rb b/keystone/lib/puppet/type/keystone_user_role.rb index 502dc3976..28be7fdf2 100644 --- a/keystone/lib/puppet/type/keystone_user_role.rb +++ b/keystone/lib/puppet/type/keystone_user_role.rb @@ -2,6 +2,8 @@ File.expand_path('../..', File.dirname(__FILE__)).tap { |dir| $LOAD_PATH.unshift(dir) unless $LOAD_PATH.include?(dir) } File.expand_path('../../../../openstacklib/lib', File.dirname(__FILE__)).tap { |dir| $LOAD_PATH.unshift(dir) unless $LOAD_PATH.include?(dir) } +require 'puppet/provider/keystone/util' + Puppet::Type.newtype(:keystone_user_role) do desc <<-EOT @@ -31,15 +33,33 @@ def insync?(is) end autorequire(:keystone_tenant) do - self[:name].rpartition('@').last + proj, dom = Util.split_domain(self[:name].rpartition('@').last) + rv = nil + if proj # i.e. not ::domain + rv = self[:name].rpartition('@').last + end + rv end autorequire(:keystone_role) do self[:roles] end + autorequire(:keystone_domain) do + rv = [] + userdom = Util.split_domain(self[:name].rpartition('@').first)[1] + if userdom + rv << userdom + end + projectdom = Util.split_domain(self[:name].rpartition('@').last)[1] + if projectdom + rv << projectdom + end + rv + end + # we should not do anything until the keystone service is started - autorequire(:service) do - ['keystone'] + autorequire(:anchor) do + ['keystone_started'] end end diff --git a/keystone/manifests/cron/token_flush.pp b/keystone/manifests/cron/token_flush.pp index 331eeba56..6dba32b47 100644 --- a/keystone/manifests/cron/token_flush.pp +++ b/keystone/manifests/cron/token_flush.pp @@ -45,14 +45,19 @@ # Induces a random delay before running the cronjob to avoid running all # cron jobs at the same time on all hosts this job is configured. # +# [*destination*] +# (optional) Path to file to which rows should be archived +# Defaults to '/var/log/keystone/keystone-tokenflush.log'. +# class keystone::cron::token_flush ( - $ensure = present, - $minute = 1, - $hour = 0, - $monthday = '*', - $month = '*', - $weekday = '*', - $maxdelay = 0, + $ensure = present, + $minute = 1, + $hour = 0, + $monthday = '*', + $month = '*', + $weekday = '*', + $maxdelay = 0, + $destination = '/var/log/keystone/keystone-tokenflush.log' ) { if $maxdelay == 0 { @@ -63,7 +68,7 @@ cron { 'keystone-manage token_flush': ensure => $ensure, - command => "${sleep}keystone-manage token_flush >>/var/log/keystone/keystone-tokenflush.log 2>&1", + command => "${sleep}keystone-manage token_flush >>${destination} 2>&1", environment => 'PATH=/bin:/usr/bin:/usr/sbin SHELL=/bin/sh', user => 'keystone', minute => $minute, diff --git a/keystone/manifests/endpoint.pp b/keystone/manifests/endpoint.pp index 6c821f475..ddf5c1368 100644 --- a/keystone/manifests/endpoint.pp +++ b/keystone/manifests/endpoint.pp @@ -22,6 +22,20 @@ # [*version*] # (optional) API version for endpoint. Appended to all endpoint urls. (Defaults to 'v2.0') # +# [*user_domain*] +# (Optional) Domain for $auth_name +# Defaults to undef (use the keystone server default domain) +# +# [*project_domain*] +# (Optional) Domain for $tenant (project) +# Defaults to undef (use the keystone server default domain) +# +# [*default_domain*] +# (Optional) Domain for $auth_name and $tenant (project) +# If keystone_user_domain is not specified, use $keystone_default_domain +# If keystone_project_domain is not specified, use $keystone_default_domain +# Defaults to undef +# # === Examples # # class { 'keystone::endpoint': @@ -36,6 +50,9 @@ $admin_url = 'http://127.0.0.1:35357', $version = 'v2.0', $region = 'RegionOne', + $user_domain = undef, + $project_domain = undef, + $default_domain = undef, ) { $public_url_real = "${public_url}/${version}" @@ -56,6 +73,9 @@ admin_url => $admin_url_real, internal_url => $internal_url_real, region => $region, + user_domain => $user_domain, + project_domain => $project_domain, + default_domain => $default_domain, } } diff --git a/keystone/manifests/init.pp b/keystone/manifests/init.pp index 20c2011e3..fa1c391c3 100644 --- a/keystone/manifests/init.pp +++ b/keystone/manifests/init.pp @@ -182,6 +182,21 @@ # (optional) The RabbitMQ virtual host. # Defaults to /. # +# [*rabbit_heartbeat_timeout_threshold*] +# (optional) Number of seconds after which the RabbitMQ broker is considered +# down if the heartbeat keepalive fails. Any value >0 enables heartbeats. +# Heartbeating helps to ensure the TCP connection to RabbitMQ isn't silently +# closed, resulting in missed or lost messages from the queue. +# (Requires kombu >= 3.0.7 and amqp >= 1.4.0) +# Defaults to 0 +# +# [*rabbit_heartbeat_rate*] +# (optional) How often during the rabbit_heartbeat_timeout_threshold period to +# check the heartbeat on RabbitMQ connection. (i.e. rabbit_heartbeat_rate=2 +# when rabbit_heartbeat_timeout_threshold=60, the heartbeat will be checked +# every 30 seconds. +# Defaults to 2 +# # [*rabbit_use_ssl*] # (optional) Connect over SSL for RabbitMQ # Defaults to false @@ -363,6 +378,14 @@ # (Optional) Number of maximum active Fernet keys. Integer > 0. # Defaults to undef # +# [*default_domain*] +# (optional) When Keystone v3 support is enabled, v2 clients will need +# to have a domain assigned for certain operations. For example, +# doing a user create operation must have a domain associated with it. +# This is the domain which will be used if a domain is needed and not +# explicitly set in the request. +# Defaults to undef (will use built-in Keystone default) +# # == Dependencies # None # @@ -394,82 +417,85 @@ # class keystone( $admin_token, - $package_ensure = 'present', - $client_package_ensure = 'present', - $public_bind_host = '0.0.0.0', - $admin_bind_host = '0.0.0.0', - $public_port = '5000', - $admin_port = '35357', - $verbose = false, - $debug = false, - $log_dir = '/var/log/keystone', - $log_file = false, - $use_syslog = false, - $log_facility = 'LOG_USER', - $catalog_type = 'sql', - $catalog_driver = false, - $catalog_template_file = '/etc/keystone/default_catalog.templates', - $token_provider = 'keystone.token.providers.uuid.Provider', - $token_driver = 'keystone.token.persistence.backends.sql.Token', - $token_expiration = 3600, - $revoke_driver = 'keystone.contrib.revoke.backends.sql.Revoke', - $public_endpoint = false, - $admin_endpoint = false, - $enable_ssl = false, - $ssl_certfile = '/etc/keystone/ssl/certs/keystone.pem', - $ssl_keyfile = '/etc/keystone/ssl/private/keystonekey.pem', - $ssl_ca_certs = '/etc/keystone/ssl/certs/ca.pem', - $ssl_ca_key = '/etc/keystone/ssl/private/cakey.pem', - $ssl_cert_subject = '/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost', - $cache_dir = '/var/cache/keystone', - $memcache_servers = false, - $manage_service = true, - $cache_backend = 'keystone.common.cache.noop', - $cache_backend_argument = undef, - $debug_cache_backend = false, - $token_caching = true, - $enabled = true, - $database_connection = 'sqlite:////var/lib/keystone/keystone.db', - $database_idle_timeout = '200', - $enable_pki_setup = true, - $signing_certfile = '/etc/keystone/ssl/certs/signing_cert.pem', - $signing_keyfile = '/etc/keystone/ssl/private/signing_key.pem', - $signing_ca_certs = '/etc/keystone/ssl/certs/ca.pem', - $signing_ca_key = '/etc/keystone/ssl/private/cakey.pem', - $signing_cert_subject = '/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com', - $signing_key_size = 2048, - $rabbit_host = 'localhost', - $rabbit_hosts = false, - $rabbit_password = 'guest', - $rabbit_port = '5672', - $rabbit_userid = 'guest', - $rabbit_virtual_host = '/', - $rabbit_use_ssl = false, - $kombu_ssl_ca_certs = undef, - $kombu_ssl_certfile = undef, - $kombu_ssl_keyfile = undef, - $kombu_ssl_version = 'TLSv1', - $notification_driver = false, - $notification_topics = false, - $notification_format = undef, - $control_exchange = false, - $validate_service = false, - $validate_insecure = false, - $validate_auth_url = false, - $validate_cacert = undef, - $paste_config = $::keystone::params::paste_config, - $service_provider = $::keystone::params::service_provider, - $service_name = $::keystone::params::service_name, - $max_token_size = undef, - $admin_workers = max($::processorcount, 2), - $public_workers = max($::processorcount, 2), - $sync_db = true, - $enable_fernet_setup = false, - $fernet_key_repository = '/etc/keystone/fernet-keys', - $fernet_max_active_keys = undef, + $package_ensure = 'present', + $client_package_ensure = 'present', + $public_bind_host = '0.0.0.0', + $admin_bind_host = '0.0.0.0', + $public_port = '5000', + $admin_port = '35357', + $verbose = false, + $debug = false, + $log_dir = '/var/log/keystone', + $log_file = false, + $use_syslog = false, + $log_facility = 'LOG_USER', + $catalog_type = 'sql', + $catalog_driver = false, + $catalog_template_file = '/etc/keystone/default_catalog.templates', + $token_provider = 'keystone.token.providers.uuid.Provider', + $token_driver = 'keystone.token.persistence.backends.sql.Token', + $token_expiration = 3600, + $revoke_driver = 'keystone.contrib.revoke.backends.sql.Revoke', + $public_endpoint = false, + $admin_endpoint = false, + $enable_ssl = false, + $ssl_certfile = '/etc/keystone/ssl/certs/keystone.pem', + $ssl_keyfile = '/etc/keystone/ssl/private/keystonekey.pem', + $ssl_ca_certs = '/etc/keystone/ssl/certs/ca.pem', + $ssl_ca_key = '/etc/keystone/ssl/private/cakey.pem', + $ssl_cert_subject = '/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost', + $cache_dir = '/var/cache/keystone', + $memcache_servers = false, + $manage_service = true, + $cache_backend = 'keystone.common.cache.noop', + $cache_backend_argument = undef, + $debug_cache_backend = false, + $token_caching = true, + $enabled = true, + $database_connection = 'sqlite:////var/lib/keystone/keystone.db', + $database_idle_timeout = '200', + $enable_pki_setup = true, + $signing_certfile = '/etc/keystone/ssl/certs/signing_cert.pem', + $signing_keyfile = '/etc/keystone/ssl/private/signing_key.pem', + $signing_ca_certs = '/etc/keystone/ssl/certs/ca.pem', + $signing_ca_key = '/etc/keystone/ssl/private/cakey.pem', + $signing_cert_subject = '/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com', + $signing_key_size = 2048, + $rabbit_host = 'localhost', + $rabbit_hosts = false, + $rabbit_password = 'guest', + $rabbit_port = '5672', + $rabbit_userid = 'guest', + $rabbit_virtual_host = '/', + $rabbit_heartbeat_timeout_threshold = 0, + $rabbit_heartbeat_rate = 2, + $rabbit_use_ssl = false, + $kombu_ssl_ca_certs = undef, + $kombu_ssl_certfile = undef, + $kombu_ssl_keyfile = undef, + $kombu_ssl_version = 'TLSv1', + $notification_driver = false, + $notification_topics = false, + $notification_format = undef, + $control_exchange = false, + $validate_service = false, + $validate_insecure = false, + $validate_auth_url = false, + $validate_cacert = undef, + $paste_config = $::keystone::params::paste_config, + $service_provider = $::keystone::params::service_provider, + $service_name = $::keystone::params::service_name, + $max_token_size = undef, + $admin_workers = max($::processorcount, 2), + $public_workers = max($::processorcount, 2), + $sync_db = true, + $enable_fernet_setup = false, + $fernet_key_repository = '/etc/keystone/fernet-keys', + $fernet_max_active_keys = undef, + $default_domain = undef, # DEPRECATED PARAMETERS - $mysql_module = undef, - $compute_port = undef, + $mysql_module = undef, + $compute_port = undef, ) inherits keystone::params { if ! $catalog_driver { @@ -756,35 +782,37 @@ } keystone_config { - 'DEFAULT/rabbit_password': value => $rabbit_password, secret => true; - 'DEFAULT/rabbit_userid': value => $rabbit_userid; - 'DEFAULT/rabbit_virtual_host': value => $rabbit_virtual_host; + 'oslo_messaging_rabbit/rabbit_password': value => $rabbit_password, secret => true; + 'oslo_messaging_rabbit/rabbit_userid': value => $rabbit_userid; + 'oslo_messaging_rabbit/rabbit_virtual_host': value => $rabbit_virtual_host; + 'oslo_messaging_rabbit/heartbeat_timeout_threshold': value => $rabbit_heartbeat_timeout_threshold; + 'oslo_messaging_rabbit/heartbeat_rate': value => $rabbit_heartbeat_rate; } if $rabbit_hosts { - keystone_config { 'DEFAULT/rabbit_hosts': value => join($rabbit_hosts, ',') } - keystone_config { 'DEFAULT/rabbit_ha_queues': value => true } + keystone_config { 'oslo_messaging_rabbit/rabbit_hosts': value => join($rabbit_hosts, ',') } + keystone_config { 'oslo_messaging_rabbit/rabbit_ha_queues': value => true } } else { - keystone_config { 'DEFAULT/rabbit_host': value => $rabbit_host } - keystone_config { 'DEFAULT/rabbit_port': value => $rabbit_port } - keystone_config { 'DEFAULT/rabbit_hosts': value => "${rabbit_host}:${rabbit_port}" } - keystone_config { 'DEFAULT/rabbit_ha_queues': value => false } + keystone_config { 'oslo_messaging_rabbit/rabbit_host': value => $rabbit_host } + keystone_config { 'oslo_messaging_rabbit/rabbit_port': value => $rabbit_port } + keystone_config { 'oslo_messaging_rabbit/rabbit_hosts': value => "${rabbit_host}:${rabbit_port}" } + keystone_config { 'oslo_messaging_rabbit/rabbit_ha_queues': value => false } } - keystone_config { 'DEFAULT/rabbit_use_ssl': value => $rabbit_use_ssl } + keystone_config { 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl } if $rabbit_use_ssl { keystone_config { - 'DEFAULT/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs; - 'DEFAULT/kombu_ssl_certfile': value => $kombu_ssl_certfile; - 'DEFAULT/kombu_ssl_keyfile': value => $kombu_ssl_keyfile; - 'DEFAULT/kombu_ssl_version': value => $kombu_ssl_version; + 'oslo_messaging_rabbit/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs; + 'oslo_messaging_rabbit/kombu_ssl_certfile': value => $kombu_ssl_certfile; + 'oslo_messaging_rabbit/kombu_ssl_keyfile': value => $kombu_ssl_keyfile; + 'oslo_messaging_rabbit/kombu_ssl_version': value => $kombu_ssl_version; } } else { keystone_config { - 'DEFAULT/kombu_ssl_ca_certs': ensure => absent; - 'DEFAULT/kombu_ssl_certfile': ensure => absent; - 'DEFAULT/kombu_ssl_keyfile': ensure => absent; - 'DEFAULT/kombu_ssl_version': ensure => absent; + 'oslo_messaging_rabbit/kombu_ssl_ca_certs': ensure => absent; + 'oslo_messaging_rabbit/kombu_ssl_certfile': ensure => absent; + 'oslo_messaging_rabbit/kombu_ssl_keyfile': ensure => absent; + 'oslo_messaging_rabbit/kombu_ssl_version': ensure => absent; } } @@ -804,6 +832,7 @@ } if $service_name == $::keystone::params::service_name { + $service_name_real = $::keystone::params::service_name if $validate_service { if $validate_auth_url { $v_auth_url = $validate_auth_url @@ -836,6 +865,7 @@ } } } elsif $service_name == 'httpd' { + include ::apache::params class { '::keystone::service': ensure => 'stopped', service_name => $::keystone::params::service_name, @@ -843,6 +873,7 @@ provider => $service_provider, validate => false, } + $service_name_real = $::apache::params::service_name } else { fail('Invalid service_name. Either keystone/openstack-keystone for running as a standalone service, or httpd for being run by a httpd server') } @@ -927,4 +958,33 @@ } } + if $default_domain { + keystone_domain { $default_domain: + ensure => present, + enabled => true, + is_default => true, + require => File['/etc/keystone/keystone.conf'], + notify => Exec['restart_keystone'], + } + anchor { 'default_domain_created': + require => Keystone_domain[$default_domain], + } + # Update this code when https://bugs.launchpad.net/keystone/+bug/1472285 is addressed. + # 1/ Keystone needs to be started before creating the default domain + # 2/ Once the default domain is created, we can query Keystone to get the default domain ID + # 3/ The Keystone_domain provider has in charge of doing the query and configure keystone.conf + # 4/ After such a change, we need to restart Keystone service. + # restart_keystone exec is doing 4/, it restart Keystone if we have a new default domain setted + # and if we manage the service to be enabled. + if $manage_service and $enabled { + exec { 'restart_keystone': + path => ['/usr/sbin', '/usr/bin', '/sbin', '/bin/'], + command => "service ${service_name_real} restart", + refreshonly => true, + } + } + } + anchor { 'keystone_started': + require => Service[$service_name] + } } diff --git a/keystone/manifests/resource/authtoken.pp b/keystone/manifests/resource/authtoken.pp new file mode 100644 index 000000000..5165abb3a --- /dev/null +++ b/keystone/manifests/resource/authtoken.pp @@ -0,0 +1,253 @@ +# == Definition: keystone::resource::authtoken +# +# This resource configures Keystone authentication resources for an OpenStack +# service. It will manage the [keystone_authtoken] section in the given +# config resource. It supports all of the authentication parameters specified +# at http://www.jamielennox.net/blog/2015/02/17/loading-authentication-plugins/ +# with the addition of the default domain for user and project. +# +# The username and project_name parameters may be given in the form +# "name::domainname". The authtoken resource will use the domains in +# the following order: +# 1) The given domain parameter (user_domain_name or project_domain_name) +# 2) The domain given as the "::domainname" part of username or project_name +# 3) The default_domain_name +# +# For example, instead of doing this:: +# +# glance_api_config { +# 'keystone_authtoken/admin_tenant_name': value => $keystone_tenant; +# 'keystone_authtoken/admin_user' : value => $keystone_user; +# 'keystone_authtoken/admin_password' : value => $keystone_password; +# secret => true; +# ... +# } +# +# manifests should do this instead:: +# +# keystone::resource::authtoken { 'glance_api_config': +# username => $keystone_user, +# password => $keystone_password, +# auth_url => $real_identity_uri, +# project_name => $keystone_tenant, +# user_domain_name => $keystone_user_domain, +# project_domain_name => $keystone_project_domain, +# default_domain_name => $keystone_default_domain, +# cacert => $ca_file, +# ... +# } +# +# The use of `keystone::resource::authtoken` makes it easy to avoid mistakes, +# and makes it easier to support some of the newer authentication types coming +# with Keystone Kilo and later, such as Kerberos, Federation, etc. +# +# == Parameters: +# +# [*name*] +# The name of the resource corresponding to the config file. For example, +# keystone::resource::authtoken { 'glance_api_config': ... } +# Where 'glance_api_config' is the name of the resource used to manage +# the glance api configuration. +# string; required +# +# [*username*] +# The name of the service user; +# string; required +# +# [*password*] +# Password to create for the service user; +# string; required +# +# [*auth_url*] +# The URL to use for authentication. +# string; required +# +# [*auth_plugin*] +# The plugin to use for authentication. +# string; optional: default to 'password' +# +# [*user_id*] +# The ID of the service user; +# string; optional: default to undef +# +# [*user_domain_name*] +# (Optional) Name of domain for $username +# Defaults to undef +# +# [*user_domain_id*] +# (Optional) ID of domain for $username +# Defaults to undef +# +# [*project_name*] +# Service project name; +# string; optional: default to undef +# +# [*project_id*] +# Service project ID; +# string; optional: default to undef +# +# [*project_domain_name*] +# (Optional) Name of domain for $project_name +# Defaults to undef +# +# [*project_domain_id*] +# (Optional) ID of domain for $project_name +# Defaults to undef +# +# [*domain_name*] +# (Optional) Use this for auth to obtain a domain-scoped token. +# If using this option, do not specify $project_name or $project_id. +# Defaults to undef +# +# [*domain_id*] +# (Optional) Use this for auth to obtain a domain-scoped token. +# If using this option, do not specify $project_name or $project_id. +# Defaults to undef +# +# [*default_domain_name*] +# (Optional) Name of domain for $username and $project_name +# If user_domain_name is not specified, use $default_domain_name +# If project_domain_name is not specified, use $default_domain_name +# Defaults to undef +# +# [*default_domain_id*] +# (Optional) ID of domain for $user_id and $project_id +# If user_domain_id is not specified, use $default_domain_id +# If project_domain_id is not specified, use $default_domain_id +# Defaults to undef +# +# [*trust_id*] +# (Optional) Trust ID +# Defaults to undef +# +# [*cacert*] +# (Optional) CA certificate file for TLS (https) +# Defaults to undef +# +# [*cert*] +# (Optional) Certificate file for TLS (https) +# Defaults to undef +# +# [*key*] +# (Optional) Key file for TLS (https) +# Defaults to undef +# +# [*insecure*] +# If true, explicitly allow TLS without checking server cert against any +# certificate authorities. WARNING: not recommended. Use with caution. +# boolean; Defaults to false (which means be secure) +# +define keystone::resource::authtoken( + $username, + $password, + $auth_url, + $auth_plugin = 'password', + $user_id = undef, + $user_domain_name = undef, + $user_domain_id = undef, + $project_name = undef, + $project_id = undef, + $project_domain_name = undef, + $project_domain_id = undef, + $domain_name = undef, + $domain_id = undef, + $default_domain_name = undef, + $default_domain_id = undef, + $trust_id = undef, + $cacert = undef, + $cert = undef, + $key = undef, + $insecure = false, +) { + + if !$project_name and !$project_id and !$domain_name and !$domain_id { + fail('Must specify either a project (project_name or project_id, for a project scoped token) or a domain (domain_name or domain_id, for a domain scoped token)') + } + + if ($project_name or $project_id) and ($domain_name or $domain_id) { + fail('Cannot specify both a project (project_name or project_id) and a domain (domain_name or domain_id)') + } + + $user_and_domain_array = split($username, '::') + $real_username = $user_and_domain_array[0] + $real_user_domain_name = pick($user_domain_name, $user_and_domain_array[1], $default_domain_name, '__nodomain__') + + $project_and_domain_array = split($project_name, '::') + $real_project_name = $project_and_domain_array[0] + $real_project_domain_name = pick($project_domain_name, $project_and_domain_array[1], $default_domain_name, '__nodomain__') + + create_resources($name, {'keystone_authtoken/auth_plugin' => {'value' => $auth_plugin}}) + create_resources($name, {'keystone_authtoken/auth_url' => {'value' => $auth_url}}) + create_resources($name, {'keystone_authtoken/username' => {'value' => $real_username}}) + create_resources($name, {'keystone_authtoken/password' => {'value' => $password, 'secret' => true}}) + if $user_id { + create_resources($name, {'keystone_authtoken/user_id' => {'value' => $user_id}}) + } else { + create_resources($name, {'keystone_authtoken/user_id' => {'ensure' => 'absent'}}) + } + if $real_user_domain_name == '__nodomain__' { + create_resources($name, {'keystone_authtoken/user_domain_name' => {'ensure' => 'absent'}}) + } else { + create_resources($name, {'keystone_authtoken/user_domain_name' => {'value' => $real_user_domain_name}}) + } + if $user_domain_id { + create_resources($name, {'keystone_authtoken/user_domain_id' => {'value' => $user_domain_id}}) + } elsif $default_domain_id { + create_resources($name, {'keystone_authtoken/user_domain_id' => {'value' => $default_domain_id}}) + } else { + create_resources($name, {'keystone_authtoken/user_domain_id' => {'ensure' => 'absent'}}) + } + if $project_name { + create_resources($name, {'keystone_authtoken/project_name' => {'value' => $real_project_name}}) + } else { + create_resources($name, {'keystone_authtoken/project_name' => {'ensure' => 'absent'}}) + } + if $project_id { + create_resources($name, {'keystone_authtoken/project_id' => {'value' => $project_id}}) + } else { + create_resources($name, {'keystone_authtoken/project_id' => {'ensure' => 'absent'}}) + } + if $real_project_domain_name == '__nodomain__' { + create_resources($name, {'keystone_authtoken/project_domain_name' => {'ensure' => 'absent'}}) + } else { + create_resources($name, {'keystone_authtoken/project_domain_name' => {'value' => $real_project_domain_name}}) + } + if $project_domain_id { + create_resources($name, {'keystone_authtoken/project_domain_id' => {'value' => $project_domain_id}}) + } elsif $default_domain_id { + create_resources($name, {'keystone_authtoken/project_domain_id' => {'value' => $default_domain_id}}) + } else { + create_resources($name, {'keystone_authtoken/project_domain_id' => {'ensure' => 'absent'}}) + } + if $domain_name { + create_resources($name, {'keystone_authtoken/domain_name' => {'value' => $domain_name}}) + } else { + create_resources($name, {'keystone_authtoken/domain_name' => {'ensure' => 'absent'}}) + } + if $domain_id { + create_resources($name, {'keystone_authtoken/domain_id' => {'value' => $domain_id}}) + } else { + create_resources($name, {'keystone_authtoken/domain_id' => {'ensure' => 'absent'}}) + } + if $trust_id { + create_resources($name, {'keystone_authtoken/trust_id' => {'value' => $trust_id}}) + } else { + create_resources($name, {'keystone_authtoken/trust_id' => {'ensure' => 'absent'}}) + } + if $cacert { + create_resources($name, {'keystone_authtoken/cacert' => {'value' => $cacert}}) + } else { + create_resources($name, {'keystone_authtoken/cacert' => {'ensure' => 'absent'}}) + } + if $cert { + create_resources($name, {'keystone_authtoken/cert' => {'value' => $cert}}) + } else { + create_resources($name, {'keystone_authtoken/cert' => {'ensure' => 'absent'}}) + } + if $key { + create_resources($name, {'keystone_authtoken/key' => {'value' => $key}}) + } else { + create_resources($name, {'keystone_authtoken/key' => {'ensure' => 'absent'}}) + } + create_resources($name, {'keystone_authtoken/insecure' => {'value' => $insecure}}) +} diff --git a/keystone/manifests/resource/service_identity.pp b/keystone/manifests/resource/service_identity.pp index 9bbd1b13a..4ac132205 100644 --- a/keystone/manifests/resource/service_identity.pp +++ b/keystone/manifests/resource/service_identity.pp @@ -69,10 +69,6 @@ # List of roles; # string; optional: default to ['admin'] # -# [*domain*] -# User domain (keystone v3), not implemented yet. -# string; optional: default to undef -# # [*email*] # Service email; # string; optional: default to '$auth_name@localhost' @@ -93,6 +89,20 @@ # Whether to create the service. # string; optional: default to True # +# [*user_domain*] +# (Optional) Domain for $auth_name +# Defaults to undef (use the keystone server default domain) +# +# [*project_domain*] +# (Optional) Domain for $tenant (project) +# Defaults to undef (use the keystone server default domain) +# +# [*default_domain*] +# (Optional) Domain for $auth_name and $tenant (project) +# If keystone_user_domain is not specified, use $keystone_default_domain +# If keystone_project_domain is not specified, use $keystone_default_domain +# Defaults to undef +# define keystone::resource::service_identity( $admin_url = false, $internal_url = false, @@ -104,7 +114,6 @@ $configure_user = true, $configure_user_role = true, $configure_service = true, - $domain = undef, $email = "${name}@localhost", $region = 'RegionOne', $service_name = undef, @@ -112,19 +121,32 @@ $tenant = 'services', $ignore_default_tenant = false, $roles = ['admin'], + $user_domain = undef, + $project_domain = undef, + $default_domain = undef, ) { - - if $domain { - warning('Keystone domains are not yet managed by puppet-keystone.') - } - if $service_name == undef { $service_name_real = $auth_name } else { $service_name_real = $service_name } + if $user_domain == undef { + $user_domain_real = $default_domain + } else { + $user_domain_real = $user_domain + } + if $configure_user { + if $user_domain_real { + # We have to use ensure_resource here and hope for the best, because we have + # no way to know if the $user_domain is the same domain passed as the + # $default_domain parameter to class keystone. + ensure_resource('keystone_domain', $user_domain_real, { + 'ensure' => 'present', + 'enabled' => true, + }) + } ensure_resource('keystone_user', $auth_name, { 'ensure' => 'present', 'enabled' => true, @@ -132,6 +154,7 @@ 'email' => $email, 'tenant' => $tenant, 'ignore_default_tenant' => $ignore_default_tenant, + 'domain' => $user_domain_real, }) } @@ -140,9 +163,6 @@ 'ensure' => 'present', 'roles' => $roles, }) - if $configure_user { - Keystone_user[$auth_name] -> Keystone_user_role["${auth_name}@${tenant}"] - } } if $configure_service { diff --git a/keystone/manifests/roles/admin.pp b/keystone/manifests/roles/admin.pp index aa5abd72f..37758c812 100644 --- a/keystone/manifests/roles/admin.pp +++ b/keystone/manifests/roles/admin.pp @@ -51,7 +51,19 @@ # # [*configure_user_role*] # Optional. Should the admin role be configured for the admin user? -# Defaulst to 'true'. +# Defaults to 'true'. +# +# [*admin_user_domain*] +# Optional. Domain of the admin user +# Defaults to undef (undef will resolve to class keystone $default_domain) +# +# [*admin_project_domain*] +# Optional. Domain of the admin tenant +# Defaults to undef (undef will resolve to class keystone $default_domain) +# +# [*service_project_domain*] +# Optional. Domain for $service_tenant +# Defaults to undef (undef will resolve to class keystone $default_domain) # # == Dependencies # == Examples @@ -75,17 +87,28 @@ $service_tenant_desc = 'Tenant for the openstack services', $configure_user = true, $configure_user_role = true, + $admin_user_domain = undef, + $admin_project_domain = undef, + $service_project_domain = undef, ) { + $domains = unique(delete_undef_values([ $admin_user_domain, $admin_project_domain, $service_project_domain])) + keystone_domain { $domains: + ensure => present, + enabled => true, + } + keystone_tenant { $service_tenant: ensure => present, enabled => true, description => $service_tenant_desc, + domain => $service_project_domain, } keystone_tenant { $admin_tenant: ensure => present, enabled => true, description => $admin_tenant_desc, + domain => $admin_project_domain, } keystone_role { 'admin': ensure => present, @@ -98,6 +121,7 @@ tenant => $admin_tenant, email => $email, password => $password, + domain => $admin_user_domain, ignore_default_tenant => $ignore_default_tenant, } } diff --git a/keystone/metadata.json b/keystone/metadata.json index 77e23afd4..5a2e3ff61 100644 --- a/keystone/metadata.json +++ b/keystone/metadata.json @@ -1,6 +1,6 @@ { - "name": "stackforge-keystone", - "version": "5.1.0", + "name": "openstack-keystone", + "version": "6.0.0", "author": "Puppet Labs and OpenStack Contributors", "summary": "Puppet module for OpenStack Keystone", "license": "Apache-2.0", @@ -34,6 +34,6 @@ { "name": "puppetlabs/apache", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0 <6.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } ] } diff --git a/keystone/spec/acceptance/basic_keystone_spec.rb b/keystone/spec/acceptance/basic_keystone_spec.rb index b0fd8d398..f8aa5023f 100644 --- a/keystone/spec/acceptance/basic_keystone_spec.rb +++ b/keystone/spec/acceptance/basic_keystone_spec.rb @@ -42,14 +42,17 @@ class { '::keystone': database_connection => 'mysql://keystone:keystone@127.0.0.1/keystone', admin_token => 'admin_token', enabled => true, + default_domain => 'default_domain', } + # "v2" admin and service class { '::keystone::roles::admin': - email => 'test@example.tld', - password => 'a_big_secret', + email => 'test@example.tld', + password => 'a_big_secret', } class { '::keystone::endpoint': - public_url => "http://127.0.0.1:5000/", - admin_url => "http://127.0.0.1:35357/", + public_url => "http://127.0.0.1:5000/", + admin_url => "http://127.0.0.1:35357/", + default_domain => 'admin', } ::keystone::resource::service_identity { 'beaker-ci': service_type => 'beaker', @@ -60,6 +63,56 @@ class { '::keystone::endpoint': admin_url => 'http://127.0.0.1:1234', internal_url => 'http://127.0.0.1:1234', } + # v3 admin + # we don't use ::keystone::roles::admin but still create resources manually: + keystone_domain { 'admin_domain': + ensure => present, + enabled => true, + description => 'Domain for admin v3 users', + } + keystone_domain { 'service_domain': + ensure => present, + enabled => true, + description => 'Domain for admin v3 users', + } + keystone_tenant { 'servicesv3': + ensure => present, + enabled => true, + description => 'Tenant for the openstack services', + domain => 'service_domain', + } + keystone_tenant { 'openstackv3': + ensure => present, + enabled => true, + description => 'admin tenant', + domain => 'admin_domain', + } + keystone_user { 'adminv3': + ensure => present, + enabled => true, + tenant => 'openstackv3', # note: don't have to use 'openstackv3::admin_domain' here since the tenant name 'openstackv3' is unique among all domains + email => 'test@example.tld', + password => 'a_big_secret', + domain => 'admin_domain', + } + keystone_user_role { 'adminv3@openstackv3': + ensure => present, + roles => ['admin'], + } + # service user exists only in the service_domain - must + # use v3 api + ::keystone::resource::service_identity { 'beaker-civ3': + service_type => 'beakerv3', + service_description => 'beakerv3 service', + service_name => 'beakerv3', + password => 'secret', + tenant => 'servicesv3', + public_url => 'http://127.0.0.1:1234/v3', + admin_url => 'http://127.0.0.1:1234/v3', + internal_url => 'http://127.0.0.1:1234/v3', + user_domain => 'service_domain', + project_domain => 'service_domain', + } EOS @@ -77,40 +130,97 @@ class { '::keystone::endpoint': end describe cron do - it { should have_entry('1 0 * * * keystone-manage token_flush >>/var/log/keystone/keystone-tokenflush.log 2>&1').with_user('keystone') } + it { is_expected.to have_entry('1 0 * * * keystone-manage token_flush >>/var/log/keystone/keystone-tokenflush.log 2>&1').with_user('keystone') } end - describe 'test keystone user/tenant/service/role/endpoint resources' do + shared_examples_for 'keystone user/tenant/service/role/endpoint resources using v2 API' do |auth_creds| + it 'should find users in the default domain' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v2.0 --os-identity-api-version 2 user list") do |r| + expect(r.stdout).to match(/admin/) + expect(r.stderr).to be_empty + end + end + it 'should find tenants in the default domain' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v2.0 --os-identity-api-version 2 project list") do |r| + expect(r.stdout).to match(/openstack/) + expect(r.stderr).to be_empty + end + end + it 'should find beaker service' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v2.0 --os-identity-api-version 2 service list") do |r| + expect(r.stdout).to match(/beaker/) + expect(r.stderr).to be_empty + end + end + it 'should find admin role' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v2.0 --os-identity-api-version 2 role list") do |r| + expect(r.stdout).to match(/admin/) + expect(r.stderr).to be_empty + end + end + it 'should find beaker endpoints' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v2.0 --os-identity-api-version 2 endpoint list --long") do |r| + expect(r.stdout).to match(/1234/) + expect(r.stderr).to be_empty + end + end + end + shared_examples_for 'keystone user/tenant/service/role/endpoint resources using v3 API' do |auth_creds| it 'should find beaker user' do - shell('openstack --os-username admin --os-password a_big_secret --os-tenant-name openstack --os-auth-url http://127.0.0.1:5000/v2.0 user list') do |r| + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v3 --os-identity-api-version 3 user list") do |r| expect(r.stdout).to match(/beaker/) expect(r.stderr).to be_empty end end it 'should find services tenant' do - shell('openstack --os-username admin --os-password a_big_secret --os-tenant-name openstack --os-auth-url http://127.0.0.1:5000/v2.0 project list') do |r| + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v3 --os-identity-api-version 3 project list") do |r| expect(r.stdout).to match(/services/) expect(r.stderr).to be_empty end end it 'should find beaker service' do - shell('openstack --os-username admin --os-password a_big_secret --os-tenant-name openstack --os-auth-url http://127.0.0.1:5000/v2.0 service list') do |r| + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v3 --os-identity-api-version 3 service list") do |r| expect(r.stdout).to match(/beaker/) expect(r.stderr).to be_empty end end it 'should find admin role' do - shell('openstack --os-username admin --os-password a_big_secret --os-tenant-name openstack --os-auth-url http://127.0.0.1:5000/v2.0 role list') do |r| + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v3 --os-identity-api-version 3 role list") do |r| expect(r.stdout).to match(/admin/) expect(r.stderr).to be_empty end end it 'should find beaker endpoints' do - shell('openstack --os-username admin --os-password a_big_secret --os-tenant-name openstack --os-auth-url http://127.0.0.1:5000/v2.0 endpoint list --long') do |r| + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v3 --os-identity-api-version 3 endpoint list") do |r| expect(r.stdout).to match(/1234/) expect(r.stderr).to be_empty end end end + describe 'with v2 admin with v2 credentials' do + include_examples 'keystone user/tenant/service/role/endpoint resources using v2 API', + '--os-username admin --os-password a_big_secret --os-project-name openstack' + end + describe 'with v2 service with v2 credentials' do + include_examples 'keystone user/tenant/service/role/endpoint resources using v2 API', + '--os-username beaker-ci --os-password secret --os-project-name services' + end + describe 'with v2 admin with v3 credentials' do + include_examples 'keystone user/tenant/service/role/endpoint resources using v3 API', + '--os-username admin --os-password a_big_secret --os-project-name openstack --os-user-domain-name default_domain --os-project-domain-name default_domain' + end + describe "with v2 service with v3 credentials" do + include_examples 'keystone user/tenant/service/role/endpoint resources using v3 API', + '--os-username beaker-ci --os-password secret --os-project-name services --os-user-domain-name default_domain --os-project-domain-name default_domain' + end + describe 'with v3 admin with v3 credentials' do + include_examples 'keystone user/tenant/service/role/endpoint resources using v3 API', + '--os-username adminv3 --os-password a_big_secret --os-project-name openstackv3 --os-user-domain-name admin_domain --os-project-domain-name admin_domain' + end + describe "with v3 service with v3 credentials" do + include_examples 'keystone user/tenant/service/role/endpoint resources using v3 API', + '--os-username beaker-civ3 --os-password secret --os-project-name servicesv3 --os-user-domain-name service_domain --os-project-domain-name service_domain' + end + end end diff --git a/keystone/spec/acceptance/keystone_wsgi_apache_spec.rb b/keystone/spec/acceptance/keystone_wsgi_apache_spec.rb new file mode 100644 index 000000000..b7c8d9dc2 --- /dev/null +++ b/keystone/spec/acceptance/keystone_wsgi_apache_spec.rb @@ -0,0 +1,232 @@ +require 'spec_helper_acceptance' + +describe 'keystone server running with Apache/WSGI with resources' do + + context 'default parameters' do + + it 'should work with no errors' do + pp= <<-EOS + Exec { logoutput => 'on_failure' } + + # Common resources + case $::osfamily { + 'Debian': { + include ::apt + class { '::openstack_extras::repo::debian::ubuntu': + release => 'kilo', + package_require => true, + } + } + 'RedHat': { + class { '::openstack_extras::repo::redhat::redhat': + release => 'kilo', + } + package { 'openstack-selinux': ensure => 'latest' } + } + default: { + fail("Unsupported osfamily (${::osfamily})") + } + } + + class { '::mysql::server': } + + # Keystone resources + class { '::keystone::client': } + class { '::keystone::cron::token_flush': } + class { '::keystone::db::mysql': + password => 'keystone', + } + class { '::keystone': + verbose => true, + debug => true, + database_connection => 'mysql://keystone:keystone@127.0.0.1/keystone', + admin_token => 'admin_token', + enabled => true, + service_name => 'httpd', + default_domain => 'default_domain', + } + include ::apache + class { '::keystone::wsgi::apache': + ssl => false, + } + + # "v2" admin and service + class { '::keystone::roles::admin': + email => 'test@example.tld', + password => 'a_big_secret', + } + class { '::keystone::endpoint': + public_url => "http://127.0.0.1:5000/", + admin_url => "http://127.0.0.1:35357/", + default_domain => 'admin', + } + ::keystone::resource::service_identity { 'beaker-ci': + service_type => 'beaker', + service_description => 'beaker service', + service_name => 'beaker', + password => 'secret', + public_url => 'http://127.0.0.1:1234', + admin_url => 'http://127.0.0.1:1234', + internal_url => 'http://127.0.0.1:1234', + } + # v3 admin + # we don't use ::keystone::roles::admin but still create resources manually: + keystone_domain { 'admin_domain': + ensure => present, + enabled => true, + description => 'Domain for admin v3 users', + } + keystone_domain { 'service_domain': + ensure => present, + enabled => true, + description => 'Domain for admin v3 users', + } + keystone_tenant { 'servicesv3': + ensure => present, + enabled => true, + description => 'Tenant for the openstack services', + domain => 'service_domain', + } + keystone_tenant { 'openstackv3': + ensure => present, + enabled => true, + description => 'admin tenant', + domain => 'admin_domain', + } + keystone_user { 'adminv3': + ensure => present, + enabled => true, + tenant => 'openstackv3', # note: don't have to use 'openstackv3::admin_domain' here since the tenant name 'openstackv3' is unique among all domains + email => 'test@example.tld', + password => 'a_big_secret', + domain => 'admin_domain', + } + keystone_user_role { 'adminv3@openstackv3': + ensure => present, + roles => ['admin'], + } + # service user exists only in the service_domain - must + # use v3 api + ::keystone::resource::service_identity { 'beaker-civ3': + service_type => 'beakerv3', + service_description => 'beakerv3 service', + service_name => 'beakerv3', + password => 'secret', + tenant => 'servicesv3', + public_url => 'http://127.0.0.1:1234/v3', + admin_url => 'http://127.0.0.1:1234/v3', + internal_url => 'http://127.0.0.1:1234/v3', + user_domain => 'service_domain', + project_domain => 'service_domain', + } + EOS + + + # Run it twice and test for idempotency + apply_manifest(pp, :catch_failures => true) + apply_manifest(pp, :catch_changes => true) + end + + describe port(5000) do + it { is_expected.to be_listening } + end + + describe port(35357) do + it { is_expected.to be_listening } + end + + describe cron do + it { is_expected.to have_entry('1 0 * * * keystone-manage token_flush >>/var/log/keystone/keystone-tokenflush.log 2>&1').with_user('keystone') } + end + + shared_examples_for 'keystone user/tenant/service/role/endpoint resources using v2 API' do |auth_creds| + it 'should find users in the default domain' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v2.0 --os-identity-api-version 2 user list") do |r| + expect(r.stdout).to match(/admin/) + expect(r.stderr).to be_empty + end + end + it 'should find tenants in the default domain' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v2.0 --os-identity-api-version 2 project list") do |r| + expect(r.stdout).to match(/openstack/) + expect(r.stderr).to be_empty + end + end + it 'should find beaker service' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v2.0 --os-identity-api-version 2 service list") do |r| + expect(r.stdout).to match(/beaker/) + expect(r.stderr).to be_empty + end + end + it 'should find admin role' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v2.0 --os-identity-api-version 2 role list") do |r| + expect(r.stdout).to match(/admin/) + expect(r.stderr).to be_empty + end + end + it 'should find beaker endpoints' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v2.0 --os-identity-api-version 2 endpoint list --long") do |r| + expect(r.stdout).to match(/1234/) + expect(r.stderr).to be_empty + end + end + end + shared_examples_for 'keystone user/tenant/service/role/endpoint resources using v3 API' do |auth_creds| + it 'should find beaker user' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v3 --os-identity-api-version 3 user list") do |r| + expect(r.stdout).to match(/beaker/) + expect(r.stderr).to be_empty + end + end + it 'should find services tenant' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v3 --os-identity-api-version 3 project list") do |r| + expect(r.stdout).to match(/services/) + expect(r.stderr).to be_empty + end + end + it 'should find beaker service' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v3 --os-identity-api-version 3 service list") do |r| + expect(r.stdout).to match(/beaker/) + expect(r.stderr).to be_empty + end + end + it 'should find admin role' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v3 --os-identity-api-version 3 role list") do |r| + expect(r.stdout).to match(/admin/) + expect(r.stderr).to be_empty + end + end + it 'should find beaker endpoints' do + shell("openstack #{auth_creds} --os-auth-url http://127.0.0.1:5000/v3 --os-identity-api-version 3 endpoint list") do |r| + expect(r.stdout).to match(/1234/) + expect(r.stderr).to be_empty + end + end + end + describe 'with v2 admin with v2 credentials' do + include_examples 'keystone user/tenant/service/role/endpoint resources using v2 API', + '--os-username admin --os-password a_big_secret --os-project-name openstack' + end + describe 'with v2 service with v2 credentials' do + include_examples 'keystone user/tenant/service/role/endpoint resources using v2 API', + '--os-username beaker-ci --os-password secret --os-project-name services' + end + describe 'with v2 admin with v3 credentials' do + include_examples 'keystone user/tenant/service/role/endpoint resources using v3 API', + '--os-username admin --os-password a_big_secret --os-project-name openstack --os-user-domain-name default_domain --os-project-domain-name default_domain' + end + describe "with v2 service with v3 credentials" do + include_examples 'keystone user/tenant/service/role/endpoint resources using v3 API', + '--os-username beaker-ci --os-password secret --os-project-name services --os-user-domain-name default_domain --os-project-domain-name default_domain' + end + describe 'with v3 admin with v3 credentials' do + include_examples 'keystone user/tenant/service/role/endpoint resources using v3 API', + '--os-username adminv3 --os-password a_big_secret --os-project-name openstackv3 --os-user-domain-name admin_domain --os-project-domain-name admin_domain' + end + describe "with v3 service with v3 credentials" do + include_examples 'keystone user/tenant/service/role/endpoint resources using v3 API', + '--os-username beaker-civ3 --os-password secret --os-project-name servicesv3 --os-user-domain-name service_domain --os-project-domain-name service_domain' + end + + end +end diff --git a/keystone/spec/classes/keystone_cron_token_flush_spec.rb b/keystone/spec/classes/keystone_cron_token_flush_spec.rb index 88e1fd519..24ebcd8aa 100644 --- a/keystone/spec/classes/keystone_cron_token_flush_spec.rb +++ b/keystone/spec/classes/keystone_cron_token_flush_spec.rb @@ -6,62 +6,73 @@ { :osfamily => 'Debian' } end + let :params do + { :ensure => 'present', + :minute => 1, + :hour => 0, + :monthday => '*', + :month => '*', + :weekday => '*', + :maxdelay => 0, + :destination => '/var/log/keystone/keystone-tokenflush.log' } + end + describe 'with default parameters' do it 'configures a cron' do is_expected.to contain_cron('keystone-manage token_flush').with( - :ensure => 'present', - :command => 'keystone-manage token_flush >>/var/log/keystone/keystone-tokenflush.log 2>&1', + :ensure => params[:ensure], + :command => "keystone-manage token_flush >>#{params[:destination]} 2>&1", :environment => 'PATH=/bin:/usr/bin:/usr/sbin SHELL=/bin/sh', :user => 'keystone', - :minute => 1, - :hour => 0, - :monthday => '*', - :month => '*', - :weekday => '*' + :minute => params[:minute], + :hour => params[:hour], + :monthday => params[:monthday], + :month => params[:month], + :weekday => params[:weekday] ) end end describe 'when specifying a maxdelay param' do - let :params do - { + before :each do + params.merge!( :maxdelay => 600 - } + ) end it 'configures a cron with delay' do is_expected.to contain_cron('keystone-manage token_flush').with( - :ensure => 'present', - :command => 'sleep `expr ${RANDOM} \\% 600`; keystone-manage token_flush >>/var/log/keystone/keystone-tokenflush.log 2>&1', + :ensure => params[:ensure], + :command => "sleep `expr ${RANDOM} \\% #{params[:maxdelay]}`; keystone-manage token_flush >>#{params[:destination]} 2>&1", :environment => 'PATH=/bin:/usr/bin:/usr/sbin SHELL=/bin/sh', :user => 'keystone', - :minute => 1, - :hour => 0, - :monthday => '*', - :month => '*', - :weekday => '*' + :minute => params[:minute], + :hour => params[:hour], + :monthday => params[:monthday], + :month => params[:month], + :weekday => params[:weekday] ) end end - describe 'when specifying a maxdelay param' do - let :params do - { + describe 'when disabling cron job' do + before :each do + params.merge!( :ensure => 'absent' - } + ) end it 'configures a cron with delay' do is_expected.to contain_cron('keystone-manage token_flush').with( - :ensure => 'absent', - :command => 'keystone-manage token_flush >>/var/log/keystone/keystone-tokenflush.log 2>&1', + :ensure => params[:ensure], + :command => "keystone-manage token_flush >>#{params[:destination]} 2>&1", :environment => 'PATH=/bin:/usr/bin:/usr/sbin SHELL=/bin/sh', :user => 'keystone', - :minute => 1, - :hour => 0, - :monthday => '*', - :month => '*', - :weekday => '*' + :minute => params[:minute], + :hour => params[:hour], + :monthday => params[:monthday], + :month => params[:month], + :weekday => params[:weekday] ) end end diff --git a/keystone/spec/classes/keystone_endpoint_spec.rb b/keystone/spec/classes/keystone_endpoint_spec.rb index 217d791f9..59390ec98 100644 --- a/keystone/spec/classes/keystone_endpoint_spec.rb +++ b/keystone/spec/classes/keystone_endpoint_spec.rb @@ -48,4 +48,19 @@ ) end end + + describe 'with domain parameters' do + + let :params do + { :user_domain => 'userdomain', + :project_domain => 'projectdomain', + :default_domain => 'defaultdomain' } + end + + it { is_expected.to contain_keystone__resource__service_identity('keystone').with( + :user_domain => 'userdomain', + :project_domain => 'projectdomain', + :default_domain => 'defaultdomain' + )} + end end diff --git a/keystone/spec/classes/keystone_roles_admin_spec.rb b/keystone/spec/classes/keystone_roles_admin_spec.rb index bbd6d953b..10f2a3850 100644 --- a/keystone/spec/classes/keystone_roles_admin_spec.rb +++ b/keystone/spec/classes/keystone_roles_admin_spec.rb @@ -54,8 +54,8 @@ end it { is_expected.to contain_keystone_tenant('foobar').with( - :ensure => 'present', - :enabled => true, + :ensure => 'present', + :enabled => true, :description => 'foobar description' )} it { is_expected.to contain_keystone_tenant('admin').with( @@ -95,8 +95,8 @@ before do let :params do { - :configure_user => false, - :configure_user_role => false + :configure_user => false, + :configure_user_role => false } end @@ -105,4 +105,83 @@ end end + describe 'when specifying admin_user_domain and admin_project_domain' do + let :params do + { + :email => 'foo@bar', + :password => 'ChangeMe', + :admin_tenant => 'admin_tenant', + :admin_user_domain => 'admin_user_domain', + :admin_project_domain => 'admin_project_domain', + } + end + it { is_expected.to contain_keystone_user('admin').with( + :domain => 'admin_user_domain', + :tenant => 'admin_tenant' + )} + it { is_expected.to contain_keystone_tenant('admin_tenant').with(:domain => 'admin_project_domain') } + it { is_expected.to contain_keystone_domain('admin_user_domain') } + it { is_expected.to contain_keystone_domain('admin_project_domain') } + + end + + describe 'when specifying admin_user_domain and admin_project_domain' do + let :params do + { + :email => 'foo@bar', + :password => 'ChangeMe', + :admin_tenant => 'admin_tenant::admin_project_domain', + :admin_user_domain => 'admin_user_domain', + :admin_project_domain => 'admin_project_domain', + } + end + it { is_expected.to contain_keystone_user('admin').with( + :domain => 'admin_user_domain', + :tenant => 'admin_tenant::admin_project_domain' + )} + it { is_expected.to contain_keystone_tenant('admin_tenant::admin_project_domain').with(:domain => 'admin_project_domain') } + it { is_expected.to contain_keystone_domain('admin_user_domain') } + it { is_expected.to contain_keystone_domain('admin_project_domain') } + + end + + describe 'when specifying a service domain' do + let :params do + { + :email => 'foo@bar', + :password => 'ChangeMe', + :service_tenant => 'service_project', + :service_project_domain => 'service_domain' + } + end + it { is_expected.to contain_keystone_tenant('service_project').with(:domain => 'service_domain') } + it { is_expected.to contain_keystone_domain('service_domain') } + + end + + describe 'when specifying a service domain and service tenant domain' do + let :params do + { + :email => 'foo@bar', + :password => 'ChangeMe', + :service_tenant => 'service_project::service_domain', + :service_project_domain => 'service_domain' + } + end + it { is_expected.to contain_keystone_tenant('service_project::service_domain').with(:domain => 'service_domain') } + it { is_expected.to contain_keystone_domain('service_domain') } + + end + + describe 'when admin_user_domain and admin_project_domain are equal' do + let :params do + { + :email => 'foo@bar', + :password => 'ChangeMe', + :admin_user_domain => 'admin_domain', + :admin_project_domain => 'admin_domain', + } + end + it { is_expected.to contain_keystone_domain('admin_domain') } + end end diff --git a/keystone/spec/classes/keystone_spec.rb b/keystone/spec/classes/keystone_spec.rb index 89c4fc55e..418f9d81f 100644 --- a/keystone/spec/classes/keystone_spec.rb +++ b/keystone/spec/classes/keystone_spec.rb @@ -20,79 +20,84 @@ end default_params = { - 'admin_token' => 'service_token', - 'package_ensure' => 'present', - 'client_package_ensure' => 'present', - 'public_bind_host' => '0.0.0.0', - 'admin_bind_host' => '0.0.0.0', - 'public_port' => '5000', - 'admin_port' => '35357', - 'admin_token' => 'service_token', - 'verbose' => false, - 'debug' => false, - 'catalog_type' => 'sql', - 'catalog_driver' => false, - 'token_provider' => 'keystone.token.providers.uuid.Provider', - 'token_driver' => 'keystone.token.persistence.backends.sql.Token', - 'revoke_driver' => 'keystone.contrib.revoke.backends.sql.Revoke', - 'cache_dir' => '/var/cache/keystone', - 'enable_ssl' => false, - 'ssl_certfile' => '/etc/keystone/ssl/certs/keystone.pem', - 'ssl_keyfile' => '/etc/keystone/ssl/private/keystonekey.pem', - 'ssl_ca_certs' => '/etc/keystone/ssl/certs/ca.pem', - 'ssl_ca_key' => '/etc/keystone/ssl/private/cakey.pem', - 'ssl_cert_subject' => '/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost', - 'enabled' => true, - 'manage_service' => true, - 'database_connection' => 'sqlite:////var/lib/keystone/keystone.db', - 'database_idle_timeout' => '200', - 'enable_pki_setup' => true, - 'signing_certfile' => '/etc/keystone/ssl/certs/signing_cert.pem', - 'signing_keyfile' => '/etc/keystone/ssl/private/signing_key.pem', - 'signing_ca_certs' => '/etc/keystone/ssl/certs/ca.pem', - 'signing_ca_key' => '/etc/keystone/ssl/private/cakey.pem', - 'rabbit_host' => 'localhost', - 'rabbit_password' => 'guest', - 'rabbit_userid' => 'guest', - 'admin_workers' => 20, - 'public_workers' => 20, - 'sync_db' => true, + 'admin_token' => 'service_token', + 'package_ensure' => 'present', + 'client_package_ensure' => 'present', + 'public_bind_host' => '0.0.0.0', + 'admin_bind_host' => '0.0.0.0', + 'public_port' => '5000', + 'admin_port' => '35357', + 'admin_token' => 'service_token', + 'verbose' => false, + 'debug' => false, + 'catalog_type' => 'sql', + 'catalog_driver' => false, + 'token_provider' => 'keystone.token.providers.uuid.Provider', + 'token_driver' => 'keystone.token.persistence.backends.sql.Token', + 'revoke_driver' => 'keystone.contrib.revoke.backends.sql.Revoke', + 'cache_dir' => '/var/cache/keystone', + 'enable_ssl' => false, + 'ssl_certfile' => '/etc/keystone/ssl/certs/keystone.pem', + 'ssl_keyfile' => '/etc/keystone/ssl/private/keystonekey.pem', + 'ssl_ca_certs' => '/etc/keystone/ssl/certs/ca.pem', + 'ssl_ca_key' => '/etc/keystone/ssl/private/cakey.pem', + 'ssl_cert_subject' => '/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost', + 'enabled' => true, + 'manage_service' => true, + 'database_connection' => 'sqlite:////var/lib/keystone/keystone.db', + 'database_idle_timeout' => '200', + 'enable_pki_setup' => true, + 'signing_certfile' => '/etc/keystone/ssl/certs/signing_cert.pem', + 'signing_keyfile' => '/etc/keystone/ssl/private/signing_key.pem', + 'signing_ca_certs' => '/etc/keystone/ssl/certs/ca.pem', + 'signing_ca_key' => '/etc/keystone/ssl/private/cakey.pem', + 'rabbit_host' => 'localhost', + 'rabbit_password' => 'guest', + 'rabbit_userid' => 'guest', + 'rabbit_heartbeat_timeout_threshold' => 0, + 'rabbit_heartbeat_rate' => 2, + 'admin_workers' => 20, + 'public_workers' => 20, + 'sync_db' => true, } override_params = { - 'package_ensure' => 'latest', - 'client_package_ensure' => 'latest', - 'public_bind_host' => '0.0.0.0', - 'admin_bind_host' => '0.0.0.0', - 'public_port' => '5001', - 'admin_port' => '35358', - 'admin_token' => 'service_token_override', - 'verbose' => true, - 'debug' => true, - 'catalog_type' => 'template', - 'token_provider' => 'keystone.token.providers.uuid.Provider', - 'token_driver' => 'keystone.token.backends.kvs.Token', - 'revoke_driver' => 'keystone.contrib.revoke.backends.kvs.Revoke', - 'public_endpoint' => 'https://localhost:5000/v2.0/', - 'admin_endpoint' => 'https://localhost:35357/v2.0/', - 'enable_ssl' => true, - 'ssl_certfile' => '/etc/keystone/ssl/certs/keystone.pem', - 'ssl_keyfile' => '/etc/keystone/ssl/private/keystonekey.pem', - 'ssl_ca_certs' => '/etc/keystone/ssl/certs/ca.pem', - 'ssl_ca_key' => '/etc/keystone/ssl/private/cakey.pem', - 'ssl_cert_subject' => '/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost', - 'enabled' => false, - 'manage_service' => true, - 'database_connection' => 'mysql://a:b@c/d', - 'database_idle_timeout' => '300', - 'enable_pki_setup' => true, - 'signing_certfile' => '/etc/keystone/ssl/certs/signing_cert.pem', - 'signing_keyfile' => '/etc/keystone/ssl/private/signing_key.pem', - 'signing_ca_certs' => '/etc/keystone/ssl/certs/ca.pem', - 'signing_ca_key' => '/etc/keystone/ssl/private/cakey.pem', - 'rabbit_host' => '127.0.0.1', - 'rabbit_password' => 'openstack', - 'rabbit_userid' => 'admin', + 'package_ensure' => 'latest', + 'client_package_ensure' => 'latest', + 'public_bind_host' => '0.0.0.0', + 'admin_bind_host' => '0.0.0.0', + 'public_port' => '5001', + 'admin_port' => '35358', + 'admin_token' => 'service_token_override', + 'verbose' => true, + 'debug' => true, + 'catalog_type' => 'template', + 'token_provider' => 'keystone.token.providers.uuid.Provider', + 'token_driver' => 'keystone.token.backends.kvs.Token', + 'revoke_driver' => 'keystone.contrib.revoke.backends.kvs.Revoke', + 'public_endpoint' => 'https://localhost:5000/v2.0/', + 'admin_endpoint' => 'https://localhost:35357/v2.0/', + 'enable_ssl' => true, + 'ssl_certfile' => '/etc/keystone/ssl/certs/keystone.pem', + 'ssl_keyfile' => '/etc/keystone/ssl/private/keystonekey.pem', + 'ssl_ca_certs' => '/etc/keystone/ssl/certs/ca.pem', + 'ssl_ca_key' => '/etc/keystone/ssl/private/cakey.pem', + 'ssl_cert_subject' => '/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost', + 'enabled' => false, + 'manage_service' => true, + 'database_connection' => 'mysql://a:b@c/d', + 'database_idle_timeout' => '300', + 'enable_pki_setup' => true, + 'signing_certfile' => '/etc/keystone/ssl/certs/signing_cert.pem', + 'signing_keyfile' => '/etc/keystone/ssl/private/signing_key.pem', + 'signing_ca_certs' => '/etc/keystone/ssl/certs/ca.pem', + 'signing_ca_key' => '/etc/keystone/ssl/private/cakey.pem', + 'rabbit_host' => '127.0.0.1', + 'rabbit_password' => 'openstack', + 'rabbit_userid' => 'admin', + 'rabbit_heartbeat_timeout_threshold' => '60', + 'rabbit_heartbeat_rate' => '10', + 'default_domain' => 'other_domain', } httpd_params = {'service_name' => 'httpd'}.merge(default_params) @@ -175,7 +180,7 @@ end it 'should contain correct revoke driver' do - should contain_keystone_config('revoke/driver').with_value(param_hash['revoke_driver']) + is_expected.to contain_keystone_config('revoke/driver').with_value(param_hash['revoke_driver']) end it 'should ensure proper setting of admin_endpoint and public_endpoint' do @@ -192,7 +197,12 @@ end it 'should contain correct rabbit_password' do - is_expected.to contain_keystone_config('DEFAULT/rabbit_password').with_value(param_hash['rabbit_password']).with_secret(true) + is_expected.to contain_keystone_config('oslo_messaging_rabbit/rabbit_password').with_value(param_hash['rabbit_password']).with_secret(true) + end + + it 'should contain correct rabbit heartbeat configuration' do + is_expected.to contain_keystone_config('oslo_messaging_rabbit/heartbeat_timeout_threshold').with_value(param_hash['rabbit_heartbeat_timeout_threshold']) + is_expected.to contain_keystone_config('oslo_messaging_rabbit/heartbeat_rate').with_value(param_hash['rabbit_heartbeat_rate']) end it 'should remove max_token_size param by default' do @@ -211,6 +221,11 @@ is_expected.to contain_keystone_config('DEFAULT/public_workers').with_value('2') end end + + if param_hash['default_domain'] + it { is_expected.to contain_keystone_domain(param_hash['default_domain']).with(:is_default => true) } + it { is_expected.to contain_anchor('default_domain_created') } + end end [default_params, override_params].each do |param_hash| @@ -229,6 +244,8 @@ 'hasrestart' => true ) } + it { is_expected.to contain_anchor('keystone_started') } + end end @@ -245,11 +262,11 @@ it do expect { - should contain_service(platform_parameters[:service_name]).with('ensure' => 'running') + is_expected.to contain_service(platform_parameters[:service_name]).with('ensure' => 'running') }.to raise_error(RSpec::Expectations::ExpectationNotMetError, /expected that the catalogue would contain Service\[#{platform_parameters[:service_name]}\]/) end - it { should contain_class('keystone::service').with( + it { is_expected.to contain_class('keystone::service').with( 'ensure' => 'stopped', 'service_name' => platform_parameters[:service_name], 'enable' => false, @@ -276,6 +293,7 @@ 'hasstatus' => true, 'hasrestart' => true ) } + it { is_expected.to contain_anchor('keystone_started') } end describe 'when configuring signing token provider' do @@ -629,11 +647,11 @@ end it do - is_expected.to contain_keystone_config('DEFAULT/rabbit_use_ssl').with_value('true') - is_expected.to contain_keystone_config('DEFAULT/kombu_ssl_ca_certs').with_value('/path/to/ssl/ca/certs') - is_expected.to contain_keystone_config('DEFAULT/kombu_ssl_certfile').with_value('/path/to/ssl/cert/file') - is_expected.to contain_keystone_config('DEFAULT/kombu_ssl_keyfile').with_value('/path/to/ssl/keyfile') - is_expected.to contain_keystone_config('DEFAULT/kombu_ssl_version').with_value('TLSv1') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('true') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_value('/path/to/ssl/ca/certs') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_value('/path/to/ssl/cert/file') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_value('/path/to/ssl/keyfile') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_version').with_value('TLSv1') end end @@ -649,11 +667,11 @@ end it do - is_expected.to contain_keystone_config('DEFAULT/rabbit_use_ssl').with_value('false') - is_expected.to contain_keystone_config('DEFAULT/kombu_ssl_ca_certs').with_ensure('absent') - is_expected.to contain_keystone_config('DEFAULT/kombu_ssl_certfile').with_ensure('absent') - is_expected.to contain_keystone_config('DEFAULT/kombu_ssl_keyfile').with_ensure('absent') - is_expected.to contain_keystone_config('DEFAULT/kombu_ssl_version').with_ensure('absent') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/rabbit_use_ssl').with_value('false') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_ca_certs').with_ensure('absent') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_certfile').with_ensure('absent') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_keyfile').with_ensure('absent') + is_expected.to contain_keystone_config('oslo_messaging_rabbit/kombu_ssl_version').with_ensure('absent') end end @@ -845,6 +863,51 @@ end end + shared_examples_for "when configuring default domain" do + describe 'with default config' do + let :params do + default_params + end + it { is_expected.to_not contain_exec('restart_keystone') } + end + describe 'with default domain and eventlet service is managed and enabled' do + let :params do + default_params.merge({ + 'default_domain'=> 'test', + }) + end + it { is_expected.to contain_exec('restart_keystone').with( + 'command' => "service #{platform_parameters[:service_name]} restart", + ) } + it { is_expected.to contain_anchor('default_domain_created') } + end + describe 'with default domain and wsgi service is managed and enabled' do + let :pre_condition do + 'include ::apache' + end + let :params do + default_params.merge({ + 'default_domain'=> 'test', + 'service_name' => 'httpd', + }) + end + it { is_expected.to contain_exec('restart_keystone').with( + 'command' => "service #{platform_parameters[:httpd_service_name]} restart", + ) } + it { is_expected.to contain_anchor('default_domain_created') } + end + describe 'with default domain and service is not managed' do + let :params do + default_params.merge({ + 'default_domain' => 'test', + 'manage_service' => false, + }) + end + it { is_expected.to_not contain_exec('restart_keystone') } + it { is_expected.to contain_anchor('default_domain_created') } + end + end + context 'on RedHat platforms' do let :facts do global_facts.merge({ @@ -855,11 +918,13 @@ let :platform_parameters do { - :service_name => 'openstack-keystone' + :service_name => 'openstack-keystone', + :httpd_service_name => 'httpd', } end it_configures 'when using default class parameters for httpd' + it_configures 'when configuring default domain' end context 'on Debian platforms' do @@ -873,10 +938,12 @@ let :platform_parameters do { - :service_name => 'keystone' + :service_name => 'keystone', + :httpd_service_name => 'apache2', } end it_configures 'when using default class parameters for httpd' + it_configures 'when configuring default domain' end end diff --git a/keystone/spec/defines/keystone_resource_authtoken_spec.rb b/keystone/spec/defines/keystone_resource_authtoken_spec.rb new file mode 100644 index 000000000..06894070b --- /dev/null +++ b/keystone/spec/defines/keystone_resource_authtoken_spec.rb @@ -0,0 +1,198 @@ +require 'spec_helper' + +describe 'keystone::resource::authtoken' do + + let (:title) { 'keystone_config' } + + let :required_params do + { :username => 'keystone', + :password => 'secret', + :auth_url => 'http://127.0.0.1:35357/', + :project_name => 'services' } + end + + shared_examples 'shared examples' do + + context 'with only required parameters' do + let :params do + required_params + end + + it { is_expected.to contain_keystone_config('keystone_authtoken/username').with( + :value => 'keystone', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/user_id').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/password').with( + :value => 'secret', + :secret => true, + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/auth_plugin').with( + :value => 'password', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/auth_url').with( + :value => 'http://127.0.0.1:35357/', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/project_name').with( + :value => 'services', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/project_id').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/user_domain_name').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/project_domain_name').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/user_domain_id').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/project_domain_id').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/domain_name').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/domain_id').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/trust_id').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/cacert').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/cert').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/key').with( + :ensure => 'absent', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/insecure').with( + :value => 'false', + )} + + end + + context 'when omitting a required parameter password' do + let :params do + required_params.delete(:password) + end + it { expect { is_expected.to raise_error(Puppet::Error) } } + end + + context 'when specifying auth_url' do + let :params do + required_params.merge({:auth_url => 'https://host:11111/v3/'}) + end + it { is_expected.to contain_keystone_config('keystone_authtoken/auth_url').with( + :value => 'https://host:11111/v3/', + )} + + end + + context 'when specifying project and scope_domain' do + let :params do + required_params.merge({:domain_name => 'domain'}) + end + it { expect { is_expected.to raise_error(Puppet::Error, 'Cannot specify both a project (project_name or project_id) and a domain (domain_name or domain_id)') } } + end + + context 'when specifying neither project nor domain' do + let :params do + required_params.delete(:project_name) + end + it { expect { is_expected.to raise_error(Puppet::Error, 'Must specify either a project (project_name or project_id, for a project scoped token) or a domain (domain_name or domain_id, for a domain scoped token)') } } + end + + context 'when specifying domain in name' do + let :params do + required_params.merge({ + :username => 'keystone::userdomain', + :project_name => 'services::projdomain', + :default_domain_name => 'shouldnotuse' + }) + end + it { is_expected.to contain_keystone_config('keystone_authtoken/user_domain_name').with( + :value => 'userdomain', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/project_domain_name').with( + :value => 'projdomain', + )} + + end + + context 'when specifying domain in parameters' do + let :params do + required_params.merge({ + :username => 'keystone::userdomain', + :user_domain_name => 'realuserdomain', + :project_name => 'services::projdomain', + :project_domain_name => 'realprojectdomain', + :default_domain_name => 'shouldnotuse' + }) + end + it { is_expected.to contain_keystone_config('keystone_authtoken/user_domain_name').with( + :value => 'realuserdomain', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/project_domain_name').with( + :value => 'realprojectdomain', + )} + + end + + context 'when specifying only default domain' do + let :params do + required_params.merge({ + :default_domain_name => 'defaultdomain' + }) + end + it { is_expected.to contain_keystone_config('keystone_authtoken/user_domain_name').with( + :value => 'defaultdomain', + )} + + it { is_expected.to contain_keystone_config('keystone_authtoken/project_domain_name').with( + :value => 'defaultdomain', + )} + + end + + end + + context 'on a Debian osfamily' do + let :facts do + { :osfamily => "Debian" } + end + + include_examples 'shared examples' + end + + context 'on a RedHat osfamily' do + let :facts do + { :osfamily => 'RedHat' } + end + + include_examples 'shared examples' + end +end diff --git a/keystone/spec/defines/keystone_resource_service_identity_spec.rb b/keystone/spec/defines/keystone_resource_service_identity_spec.rb index 18979635e..63ef98ad9 100644 --- a/keystone/spec/defines/keystone_resource_service_identity_spec.rb +++ b/keystone/spec/defines/keystone_resource_service_identity_spec.rb @@ -69,6 +69,69 @@ it { expect { is_expected.to raise_error(Puppet::Error) } } end + context 'with user domain' do + let :params do + required_params.merge({:user_domain => 'userdomain'}) + end + it { is_expected.to contain_keystone_domain('userdomain').with( + :ensure => 'present', + )} + it { is_expected.to contain_keystone_user(title).with( + :ensure => 'present', + :password => 'secrete', + :email => 'neutron@localhost', + :tenant => 'services', + :domain => 'userdomain', + )} + it { is_expected.to contain_keystone_user_role("#{title}@services").with( + :ensure => 'present', + :roles => ['admin'], + )} + end + context 'with user and project domain' do + let :params do + required_params.merge({ + :user_domain => 'userdomain', + :project_domain => 'projdomain', + }) + end + it { is_expected.to contain_keystone_user(title).with( + :ensure => 'present', + :password => 'secrete', + :email => 'neutron@localhost', + :tenant => 'services', + :domain => 'userdomain', + )} + it { is_expected.to contain_keystone_domain('userdomain').with( + :ensure => 'present', + )} + it { is_expected.to contain_keystone_user_role("#{title}@services").with( + :ensure => 'present', + :roles => ['admin'], + )} + end + context 'with default domain only' do + let :params do + required_params.merge({ + :default_domain => 'defaultdomain', + }) + end + it { is_expected.to contain_keystone_user(title).with( + :ensure => 'present', + :password => 'secrete', + :email => 'neutron@localhost', + :tenant => 'services', + :domain => 'defaultdomain', + )} + it { is_expected.to contain_keystone_domain('defaultdomain').with( + :ensure => 'present', + )} + it { is_expected.to contain_keystone_user_role("#{title}@services").with( + :ensure => 'present', + :roles => ['admin'], + )} + end + end context 'on a Debian osfamily' do diff --git a/keystone/spec/spec_helper.rb b/keystone/spec/spec_helper.rb index 78594f8ae..fb2fcd0c0 100644 --- a/keystone/spec/spec_helper.rb +++ b/keystone/spec/spec_helper.rb @@ -7,3 +7,16 @@ c.alias_it_should_behave_like_to :it_configures, 'configures' c.alias_it_should_behave_like_to :it_raises, 'raises' end + +def setup_provider_tests + Puppet::Provider::Keystone.class_exec do + def self.reset + @admin_endpoint = nil + @tenant_hash = nil + @admin_token = nil + @keystone_file = nil + Puppet::Provider::Keystone.default_domain_id = nil + @domain_hash = nil + end + end +end diff --git a/keystone/spec/spec_helper_acceptance.rb b/keystone/spec/spec_helper_acceptance.rb index 429e807c4..144b31e3f 100644 --- a/keystone/spec/spec_helper_acceptance.rb +++ b/keystone/spec/spec_helper_acceptance.rb @@ -38,7 +38,7 @@ zuul_clone_cmd += "git://git.openstack.org #{repo}" on host, zuul_clone_cmd else - on host, "git clone https://git.openstack.org/#{repo} #{repo}" + on host, "git clone -b stable/kilo https://git.openstack.org/#{repo} #{repo}" end on host, "ZUUL_REF=#{zuul_ref} ZUUL_BRANCH=#{zuul_branch} ZUUL_URL=#{zuul_url} bash #{repo}/install_modules.sh" diff --git a/keystone/spec/unit/provider/keystone_domain/openstack_spec.rb b/keystone/spec/unit/provider/keystone_domain/openstack_spec.rb index 497f09bae..c57373202 100644 --- a/keystone/spec/unit/provider/keystone_domain/openstack_spec.rb +++ b/keystone/spec/unit/provider/keystone_domain/openstack_spec.rb @@ -2,26 +2,12 @@ require 'spec_helper' require 'puppet/provider/keystone_domain/openstack' -provider_class = Puppet::Type.type(:keystone_domain).provider(:openstack) +setup_provider_tests -class Puppet::Provider::Keystone - def self.reset - @admin_endpoint = nil - @tenant_hash = nil - @admin_token = nil - @keystone_file = nil - @domain_id_to_name = nil - @default_domain_id = nil - @domain_hash = nil - end -end +provider_class = Puppet::Type.type(:keystone_domain).provider(:openstack) describe provider_class do - after :each do - provider_class.reset - end - shared_examples 'authenticated with environment variables' do ENV['OS_USERNAME'] = 'test' ENV['OS_PASSWORD'] = 'abc123' @@ -48,6 +34,18 @@ def self.reset provider_class.new(resource) end + let(:another_class) do + class AnotherKlass < Puppet::Provider::Keystone + @credentials = Puppet::Provider::Openstack::CredentialsV3.new + end + AnotherKlass + end + + after :each do + provider_class.reset + another_class.reset + end + it_behaves_like 'authenticated with environment variables' do describe '#create' do it 'creates a domain' do @@ -115,12 +113,12 @@ def self.reset end it 'creates a default domain' do - File.expects(:exists?).returns(true) + File.expects(:exists?).twice.returns(true) mock = { 'identity' => {'default_domain_id' => ' default'} } - Puppet::Util::IniConfig::File.expects(:new).returns(mock) - mock.expects(:read).with('/etc/keystone/keystone.conf') + Puppet::Util::IniConfig::File.expects(:new).twice.returns(mock) + mock.expects(:read).twice.with('/etc/keystone/keystone.conf') mock.expects(:store) provider.class.expects(:openstack) .with('domain', 'create', '--format', 'shell', ['foo', '--enable', '--description', 'foo']) @@ -129,9 +127,13 @@ def self.reset description="foo" enabled=True ') + expect(provider.class.default_domain_id).to eq('default') + expect(another_class.default_domain_id).to eq('default') provider.create expect(provider.exists?).to be_truthy expect(mock['identity']['default_domain_id']).to eq('1cb05cfed7c24279be884ba4f6520262') + expect(provider.class.default_domain_id).to eq('1cb05cfed7c24279be884ba4f6520262') + expect(another_class.default_domain_id).to eq('1cb05cfed7c24279be884ba4f6520262') end end @@ -154,6 +156,8 @@ def self.reset provider.destroy expect(provider.exists?).to be_falsey expect(kcmock['identity']['default_domain_id']).to eq('default') + expect(provider.class.default_domain_id).to eq('default') + expect(another_class.default_domain_id).to eq('default') end end diff --git a/keystone/spec/unit/provider/keystone_paste_ini/ini_setting_spec.rb b/keystone/spec/unit/provider/keystone_paste_ini/ini_setting_spec.rb index 2eff5d63f..56379487f 100644 --- a/keystone/spec/unit/provider/keystone_paste_ini/ini_setting_spec.rb +++ b/keystone/spec/unit/provider/keystone_paste_ini/ini_setting_spec.rb @@ -23,7 +23,7 @@ {:name => 'dude/foo', :value => 'bar'} ) provider = provider_class.new(resource) - provider.section.should == 'dude' - provider.setting.should == 'foo' + expect(provider.section).to eq('dude') + expect(provider.setting).to eq('foo') end end diff --git a/keystone/spec/unit/provider/keystone_spec.rb b/keystone/spec/unit/provider/keystone_spec.rb index 44d265f52..e1035540f 100644 --- a/keystone/spec/unit/provider/keystone_spec.rb +++ b/keystone/spec/unit/provider/keystone_spec.rb @@ -3,122 +3,200 @@ require 'puppet/provider/keystone' require 'tempfile' +setup_provider_tests + klass = Puppet::Provider::Keystone class Puppet::Provider::Keystone @credentials = Puppet::Provider::Openstack::CredentialsV3.new - - def self.reset - @admin_endpoint = nil - @tenant_hash = nil - @admin_token = nil - @keystone_file = nil - @domain_id_to_name = nil - @default_domain_id = nil - @domain_hash = nil - end end describe Puppet::Provider::Keystone do + let(:another_class) do + class AnotherKlass < Puppet::Provider::Keystone + @credentials = Puppet::Provider::Openstack::CredentialsV3.new + end + AnotherKlass + end + after :each do klass.reset + another_class.reset end - describe 'when retrieving the security token' do - it 'should return nothing if there is no keystone config file' do + describe '#ssl?' do + it 'should be false if there is no keystone file' do File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(false) - expect(klass.get_admin_token).to be_nil + expect(klass.ssl?).to be_falsey end - it 'should return nothing if the keystone config file does not have a DEFAULT section' do + it 'should be false if ssl is not configured in keystone file' do mock = {} File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) Puppet::Util::IniConfig::File.expects(:new).returns(mock) mock.expects(:read).with('/etc/keystone/keystone.conf') - expect(klass.get_admin_token).to be_nil + expect(klass.ssl?).to be_falsey end - it 'should fail if the keystone config file does not contain an admin token' do - mock = {'DEFAULT' => {'not_a_token' => 'foo'}} + it 'should be false if ssl is configured and disable in keystone file' do + mock = {'ssl' => {'enable' => 'False'}} File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) Puppet::Util::IniConfig::File.expects(:new).returns(mock) mock.expects(:read).with('/etc/keystone/keystone.conf') - expect(klass.get_admin_token).to be_nil + expect(klass.ssl?).to be_falsey end - it 'should parse the admin token if it is in the config file' do - mock = {'DEFAULT' => {'admin_token' => 'foo'}} + it 'should be true if ssl is configured and enabled in keystone file' do + mock = {'ssl' => {'enable' => 'True'}} File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) Puppet::Util::IniConfig::File.expects(:new).returns(mock) mock.expects(:read).with('/etc/keystone/keystone.conf') - expect(klass.get_admin_token).to eq('foo') + expect(klass.ssl?).to be_truthy + end + end + + describe '#get_admin_endpoint' do + it 'should return nothing if there is no keystone config file' do + expect(klass.get_admin_endpoint).to be_nil + end + + it 'should use the admin_endpoint from keystone config file with no trailing slash' do + mock = {'DEFAULT' => {'admin_endpoint' => 'https://keystone.example.com/'}} + File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) + Puppet::Util::IniConfig::File.expects(:new).returns(mock) + mock.expects(:read).with('/etc/keystone/keystone.conf') + expect(klass.get_admin_endpoint).to eq('https://keystone.example.com') end it 'should use the specified bind_host in the admin endpoint' do - mock = {'DEFAULT' => {'admin_bind_host' => '192.168.56.210', 'admin_port' => '35357' }} + mock = {'DEFAULT' => {'admin_bind_host' => '192.168.56.210', 'admin_port' => '5001' }} File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) Puppet::Util::IniConfig::File.expects(:new).returns(mock) mock.expects(:read).with('/etc/keystone/keystone.conf') - expect(klass.get_admin_endpoint).to eq('http://192.168.56.210:35357/v3/') + expect(klass.get_admin_endpoint).to eq('http://192.168.56.210:5001') end it 'should use localhost in the admin endpoint if bind_host is 0.0.0.0' do - mock = {'DEFAULT' => { 'admin_bind_host' => '0.0.0.0', 'admin_port' => '35357' }} + mock = {'DEFAULT' => { 'admin_bind_host' => '0.0.0.0', 'admin_port' => '5001' }} File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) Puppet::Util::IniConfig::File.expects(:new).returns(mock) mock.expects(:read).with('/etc/keystone/keystone.conf') - expect(klass.get_admin_endpoint).to eq('http://127.0.0.1:35357/v3/') + expect(klass.get_admin_endpoint).to eq('http://127.0.0.1:5001') end it 'should use [::1] in the admin endpoint if bind_host is ::0' do - mock = {'DEFAULT' => { 'admin_bind_host' => '::0', 'admin_port' => '35357' }} + mock = {'DEFAULT' => { 'admin_bind_host' => '::0', 'admin_port' => '5001' }} File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) Puppet::Util::IniConfig::File.expects(:new).returns(mock) mock.expects(:read).with('/etc/keystone/keystone.conf') - expect(klass.get_admin_endpoint).to eq('http://[::1]:35357/v3/') + expect(klass.get_admin_endpoint).to eq('http://[::1]:5001') end it 'should use localhost in the admin endpoint if bind_host is unspecified' do - mock = {'DEFAULT' => { 'admin_port' => '35357' }} + mock = {'DEFAULT' => { 'admin_port' => '5001' }} File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) Puppet::Util::IniConfig::File.expects(:new).returns(mock) mock.expects(:read).with('/etc/keystone/keystone.conf') - expect(klass.get_admin_endpoint).to eq('http://127.0.0.1:35357/v3/') + expect(klass.get_admin_endpoint).to eq('http://127.0.0.1:5001') end it 'should use https if ssl is enabled' do - mock = {'DEFAULT' => {'admin_bind_host' => '192.168.56.210', 'admin_port' => '35357' }, 'ssl' => {'enable' => 'True'}} + mock = {'DEFAULT' => {'admin_bind_host' => '192.168.56.210', 'admin_port' => '5001' }, 'ssl' => {'enable' => 'True'}} File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) Puppet::Util::IniConfig::File.expects(:new).returns(mock) mock.expects(:read).with('/etc/keystone/keystone.conf') - expect(klass.get_admin_endpoint).to eq('https://192.168.56.210:35357/v3/') + expect(klass.get_admin_endpoint).to eq('https://192.168.56.210:5001') end it 'should use http if ssl is disabled' do - mock = {'DEFAULT' => {'admin_bind_host' => '192.168.56.210', 'admin_port' => '35357' }, 'ssl' => {'enable' => 'False'}} + mock = {'DEFAULT' => {'admin_bind_host' => '192.168.56.210', 'admin_port' => '5001' }, 'ssl' => {'enable' => 'False'}} File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) Puppet::Util::IniConfig::File.expects(:new).returns(mock) mock.expects(:read).with('/etc/keystone/keystone.conf') - expect(klass.get_admin_endpoint).to eq('http://192.168.56.210:35357/v3/') + expect(klass.get_admin_endpoint).to eq('http://192.168.56.210:5001') end + end - it 'should use the defined admin_endpoint if available' do - mock = {'DEFAULT' => {'admin_endpoint' => 'https://keystone.example.com' }, 'ssl' => {'enable' => 'False'}} + describe '#get_auth_url' do + it 'should return nothing when OS_AUTH_URL is no defined in either the environment or the openrc file and there is no keystone configuration file' do + home = ENV['HOME'] + ENV.clear + File.expects(:exists?).with("#{home}/openrc").returns(false) + File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(false) + expect(klass.get_auth_url).to be_nil + end + + it 'should return the OS_AUTH_URL from the environment' do + ENV.clear + ENV['OS_AUTH_URL'] = 'http://127.0.0.1:5001' + expect(klass.get_auth_url).to eq('http://127.0.0.1:5001') + end + + it 'should return the OS_AUTH_URL from the openrc file when there is no OS_AUTH_URL in the environment' do + home = ENV['HOME'] + ENV.clear + mock = {'OS_AUTH_URL' => 'http://127.0.0.1:5001'} + klass.expects(:get_os_vars_from_rcfile).with("#{home}/openrc").returns(mock) + expect(klass.get_auth_url).to eq('http://127.0.0.1:5001') + end + + it 'should use admin_endpoint when nothing else is available' do + ENV.clear + mock = 'http://127.0.0.1:5001' + klass.expects(:admin_endpoint).returns(mock) + expect(klass.get_auth_url).to eq('http://127.0.0.1:5001') + end + end + + describe '#get_service_url when retrieving the security token' do + it 'should return nothing when OS_URL is not defined in environment' do + ENV.clear + expect(klass.get_service_url).to be_nil + end + + it 'should return the OS_URL from the environment' do + ENV['OS_URL'] = 'http://127.0.0.1:5001/v3' + expect(klass.get_service_url).to eq('http://127.0.0.1:5001/v3') + end + + it 'should use admin_endpoint with the API version number' do + ENV.clear + mock = 'http://127.0.0.1:5001' + klass.expects(:admin_endpoint).twice.returns(mock) + expect(klass.get_service_url).to eq('http://127.0.0.1:5001/v3') + end + end + + describe 'when retrieving the security token' do + it 'should return nothing if there is no keystone config file' do + File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(false) + expect(klass.get_admin_token).to be_nil + end + + it 'should return nothing if the keystone config file does not have a DEFAULT section' do + mock = {} File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) Puppet::Util::IniConfig::File.expects(:new).returns(mock) mock.expects(:read).with('/etc/keystone/keystone.conf') - expect(klass.get_admin_endpoint).to eq('https://keystone.example.com/v3/') + expect(klass.get_admin_token).to be_nil end - it 'should handle an admin_endpoint with a trailing slash' do - mock = {'DEFAULT' => {'admin_endpoint' => 'https://keystone.example.com/' }, 'ssl' => {'enable' => 'False'}} + it 'should fail if the keystone config file does not contain an admin token' do + mock = {'DEFAULT' => {'not_a_token' => 'foo'}} File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) Puppet::Util::IniConfig::File.expects(:new).returns(mock) mock.expects(:read).with('/etc/keystone/keystone.conf') - expect(klass.get_admin_endpoint).to eq('https://keystone.example.com/v3/') + expect(klass.get_admin_token).to be_nil end + it 'should parse the admin token if it is in the config file' do + mock = {'DEFAULT' => {'admin_token' => 'foo'}} + File.expects(:exists?).with("/etc/keystone/keystone.conf").returns(true) + Puppet::Util::IniConfig::File.expects(:new).returns(mock) + mock.expects(:read).with('/etc/keystone/keystone.conf') + expect(klass.get_admin_token).to eq('foo') + end end describe 'when using domains' do @@ -153,6 +231,29 @@ def self.reset ') expect(klass.name_and_domain('foo')).to eq(['foo', 'SomeName']) end + it 'should return the default_domain_id from one class set in another class' do + ENV['OS_USERNAME'] = 'test' + ENV['OS_PASSWORD'] = 'abc123' + ENV['OS_PROJECT_NAME'] = 'test' + ENV['OS_AUTH_URL'] = 'http://127.0.0.1:35357/v3' + klass.expects(:openstack) + .with('domain', 'list', '--quiet', '--format', 'csv', []) + .returns('"ID","Name","Enabled","Description" +"default","Default",True,"default domain" +"somename","SomeName",True,"some domain" +') + another_class.expects(:openstack) + .with('domain', 'list', '--quiet', '--format', 'csv', []) + .returns('"ID","Name","Enabled","Description" +"default","Default",True,"default domain" +"somename","SomeName",True,"some domain" +') + expect(klass.default_domain).to eq('Default') + expect(another_class.default_domain).to eq('Default') + klass.default_domain_id = 'somename' + expect(klass.default_domain).to eq('SomeName') + expect(another_class.default_domain).to eq('SomeName') + end it 'should return Default if default_domain_id is not configured' do ENV['OS_USERNAME'] = 'test' ENV['OS_PASSWORD'] = 'abc123' @@ -169,5 +270,54 @@ def self.reset ') expect(klass.name_and_domain('foo')).to eq(['foo', 'Default']) end + it 'should list all domains when requesting a domain name from an ID' do + ENV['OS_USERNAME'] = 'test' + ENV['OS_PASSWORD'] = 'abc123' + ENV['OS_PROJECT_NAME'] = 'test' + ENV['OS_AUTH_URL'] = 'http://127.0.0.1:35357/v3' + klass.expects(:openstack) + .with('domain', 'list', '--quiet', '--format', 'csv', []) + .returns('"ID","Name","Enabled","Description" +"somename","SomeName",True,"default domain" +') + expect(klass.domain_name_from_id('somename')).to eq('SomeName') + end + it 'should lookup a domain when not found in the hash' do + ENV['OS_USERNAME'] = 'test' + ENV['OS_PASSWORD'] = 'abc123' + ENV['OS_PROJECT_NAME'] = 'test' + ENV['OS_AUTH_URL'] = 'http://127.0.0.1:35357/v3' + klass.expects(:openstack) + .with('domain', 'list', '--quiet', '--format', 'csv', []) + .returns('"ID","Name","Enabled","Description" +"somename","SomeName",True,"default domain" +') + klass.expects(:openstack) + .with('domain', 'show', '--format', 'shell', 'another') + .returns(' +name="AnOther" +id="another" +') + expect(klass.domain_name_from_id('somename')).to eq('SomeName') + expect(klass.domain_name_from_id('another')).to eq('AnOther') + end + it 'should print an error when there is no such domain' do + ENV['OS_USERNAME'] = 'test' + ENV['OS_PASSWORD'] = 'abc123' + ENV['OS_PROJECT_NAME'] = 'test' + ENV['OS_AUTH_URL'] = 'http://127.0.0.1:35357/v3' + klass.expects(:openstack) + .with('domain', 'list', '--quiet', '--format', 'csv', []) + .returns('"ID","Name","Enabled","Description" +"somename","SomeName",True,"default domain" +') + klass.expects(:openstack) + .with('domain', 'show', '--format', 'shell', 'doesnotexist') + .returns(' +') + klass.expects(:err) + .with('Could not find domain with id [doesnotexist]') + expect(klass.domain_name_from_id('doesnotexist')).to eq(nil) + end end end diff --git a/keystone/spec/unit/provider/keystone_tenant/openstack_spec.rb b/keystone/spec/unit/provider/keystone_tenant/openstack_spec.rb index fd17e4250..8cfcf1b24 100644 --- a/keystone/spec/unit/provider/keystone_tenant/openstack_spec.rb +++ b/keystone/spec/unit/provider/keystone_tenant/openstack_spec.rb @@ -2,19 +2,9 @@ require 'spec_helper' require 'puppet/provider/keystone_tenant/openstack' -provider_class = Puppet::Type.type(:keystone_tenant).provider(:openstack) +setup_provider_tests -class Puppet::Provider::Keystone - def self.reset - @admin_endpoint = nil - @tenant_hash = nil - @admin_token = nil - @keystone_file = nil - @domain_id_to_name = nil - @default_domain_id = nil - @domain_hash = nil - end -end +provider_class = Puppet::Type.type(:keystone_tenant).provider(:openstack) describe provider_class do @@ -203,6 +193,7 @@ def before_hook(domainlist) provider.create expect(provider.exists?).to be_truthy expect(provider.id).to eq("project-id") + expect(provider.name).to eq('foo::foo_domain') end end @@ -230,6 +221,7 @@ def before_hook(domainlist) provider.create expect(provider.exists?).to be_truthy expect(provider.id).to eq("project-id") + expect(provider.name).to eq('foo::bar_domain') end end end diff --git a/keystone/spec/unit/provider/keystone_user/openstack_spec.rb b/keystone/spec/unit/provider/keystone_user/openstack_spec.rb index 3f545d0cf..f3836ef1d 100644 --- a/keystone/spec/unit/provider/keystone_user/openstack_spec.rb +++ b/keystone/spec/unit/provider/keystone_user/openstack_spec.rb @@ -1,9 +1,16 @@ require 'puppet' require 'spec_helper' require 'puppet/provider/keystone_user/openstack' +require 'puppet/provider/openstack' + +setup_provider_tests provider_class = Puppet::Type.type(:keystone_user).provider(:openstack) +def project_class + Puppet::Type.type(:keystone_tenant).provider(:openstack) +end + describe provider_class do shared_examples 'authenticated with environment variables' do @@ -13,6 +20,11 @@ ENV['OS_AUTH_URL'] = 'http://127.0.0.1:5000' end + after :each do + provider_class.reset + project_class.reset + end + let(:user_attrs) do { :name => 'foo', @@ -21,6 +33,7 @@ :password => 'foo', :tenant => 'foo', :email => 'foo@example.com', + :domain => 'foo_domain', } end @@ -32,22 +45,110 @@ provider_class.new(resource) end + def before_hook(delete, missing, noproject, user_cached, project_only) + unless noproject + project_class.expects(:openstack).once + .with('domain', 'list', '--quiet', '--format', 'csv', []) + .returns('"ID","Name","Enabled","Description" +"default","Default",True,"default" +"foo_domain_id","foo_domain",True,"foo domain" +"bar_domain_id","bar_domain",True,"bar domain" +"another_domain_id","another_domain",True,"another domain" +"disabled_domain_id","disabled_domain",False,"disabled domain" +') + end + + if project_only + return + end + + provider.class.expects(:openstack).once + .with('domain', 'list', '--quiet', '--format', 'csv', []) + .returns('"ID","Name","Enabled","Description" +"default","Default",True,"default" +"foo_domain_id","foo_domain",True,"foo domain" +"bar_domain_id","bar_domain",True,"bar domain" +"another_domain_id","another_domain",True,"another domain" +"disabled_domain_id","disabled_domain",False,"disabled domain" +') + if user_cached + return # using cached user, so no user list + end + if noproject + project = '' + else + project = 'foo' + end + # delete will call the search again and should not return the deleted user + foo_returns = ['"ID","Name","Project Id","Domain","Description","Email","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo",' + project + ',"foo_domain_id","foo description","foo@example.com",True +"2cb05cfed7c24279be884ba4f6520262","foo",' + project + ',"bar_domain_id","foo description","foo@example.com",True +"3cb05cfed7c24279be884ba4f6520262","foo",' + project + ',"another_domain_id","foo description","foo@example.com",True +'] + nn = 1 + if delete + nn = 2 + foo_returns << '' + end + if missing + foo_returns = [''] + end + provider.class.expects(:openstack).times(nn) + .with('user', 'list', '--quiet', '--format', 'csv', ['--long']) + .returns(*foo_returns) + end + + before :each, :default => true do + before_hook(false, false, false, false, false) + end + before :each, :delete => true do + before_hook(true, false, false, false, false) + end + before :each, :missing => true do + before_hook(false, true, false, false, false) + end + before :each, :noproject => true do + before_hook(false, false, true, false, false) + end + before :each, :default_https => true do + before_hook(false, false, false, false, false) + end + before :each, :user_cached => true do + before_hook(false, false, false, true, false) + end + before :each, :nohooks => true do + # do nothing + end + before :each, :project_only => true do + before_hook(false, false, false, false, true) + end + before :each, :noproject_user_cached => true do + before_hook(false, false, true, true, false) + end + describe 'when managing a user' do it_behaves_like 'authenticated with environment variables' do - describe '#create' do + describe '#create', :project_only => true do it 'creates a user' do - provider.class.stubs(:openstack) - .with('user', 'list', '--quiet', '--format', 'csv', '--long') - .returns('"ID","Name","Project","Email","Enabled" -"1cb05cfed7c24279be884ba4f6520262","foo","foo","foo@example.com",True + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","foo","bar_domain_id","foo",True +') + provider.class.expects(:openstack) + .with('role', 'show', '--format', 'shell', '_member_') + .returns(' +name="_member_" ') - provider.class.stubs(:openstack) - .with('user', 'create', '--format', 'shell', ['foo', '--enable', '--password', 'foo', '--project', 'foo', '--email', 'foo@example.com']) + provider.class.expects(:openstack) + .with('role', 'add', ['_member_', '--project', '2cb05cfed7c24279be884ba4f6520262', '--user', '12b23f07d4a3448d8189521ab09610b0']) + provider.class.expects(:openstack) + .with('user', 'create', '--format', 'shell', ['foo', '--enable', '--password', 'foo', '--email', 'foo@example.com', '--domain', 'foo_domain']) .returns('email="foo@example.com" enabled="True" id="12b23f07d4a3448d8189521ab09610b0" name="foo" -project_id="5e2001b2248540f191ff22627dc0c2d7" username="foo" ') provider.create @@ -57,11 +158,9 @@ describe '#destroy' do it 'destroys a user' do - provider.class.stubs(:openstack) - .with('user', 'list', '--quiet', '--format', 'csv', '--long') - .returns('"ID","Name","Project","Email","Enabled"') - provider.class.stubs(:openstack) - .with('user', 'delete', []) + provider.instance_variable_get('@property_hash')[:id] = 'my-user-id' + provider.class.expects(:openstack) + .with('user', 'delete', 'my-user-id') provider.destroy expect(provider.exists?).to be_falsey end @@ -71,9 +170,6 @@ describe '#exists' do context 'when user does not exist' do subject(:response) do - provider.class.stubs(:openstack) - .with('user', 'list', '--quiet', '--format', 'csv', '--long') - .returns('"ID","Name","Project","Email","Enabled"') response = provider.exists? end @@ -81,129 +177,128 @@ end end - describe '#instances' do + describe '#instances', :noproject => true do it 'finds every user' do - provider.class.stubs(:openstack) - .with('user', 'list', '--quiet', '--format', 'csv', '--long') - .returns('"ID","Name","Project","Email","Enabled" -"1cb05cfed7c24279be884ba4f6520262","foo","foo","foo@example.com",True -') - instances = Puppet::Type::Keystone_user::ProviderOpenstack.instances - expect(instances.count).to eq(1) + instances = provider.class.instances + expect(instances.count).to eq(3) + expect(instances[0].name).to eq('foo') + expect(instances[0].domain).to eq('another_domain') + expect(instances[1].name).to eq('foo::foo_domain') + expect(instances[2].name).to eq('foo::bar_domain') end end describe '#tenant' do - it 'gets the tenant with default backend' do - provider.class.stubs(:openstack) - .with('user', 'list', '--quiet', '--format', 'csv', '--long') - .returns('"ID","Name","Project","Email","Enabled" -"1cb05cfed7c24279be884ba4f6520262","foo","foo","foo@example.com",True + it 'gets the tenant with default backend', :user_cached => true do + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","bar","bar_domain_id","bar",True ') - provider.class.stubs(:openstack) - .with('user role', 'list', '--quiet', '--format', 'csv', ['foo', '--project', 'foo']) - .returns('"ID","Name","Project","User" -"9fe2ff9ee4384b1894a90878d3e92bab","_member_","foo","foo" + provider.class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', ['--user', '1cb05cfed7c24279be884ba4f6520262', '--long']) + .returns('"ID","Name","Domain ID","Description","Enabled" +"foo_project_id1","foo","foo_domain_id","",True ') + provider.instance_variable_get('@property_hash')[:id] = '1cb05cfed7c24279be884ba4f6520262' tenant = provider.tenant expect(tenant).to eq('foo') end - it 'gets the tenant with LDAP backend' do - provider.class.stubs(:openstack) - .with('user', 'list', '--quiet', '--format', 'csv', '--long') - .returns('"ID","Name","Project","Email","Enabled" -"1cb05cfed7c24279be884ba4f6520262","foo","","foo@example.com",True + it 'gets the tenant with LDAP backend', :user_cached => true do + provider.instance_variable_get('@property_hash')[:id] = '1cb05cfed7c24279be884ba4f6520262' + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","bar","bar_domain_id","bar",True ') provider.class.expects(:openstack) - .with('user role', 'list', '--quiet', '--format', 'csv', ['foo', '--project', 'foo']) - .returns('"ID","Name","Project","User" -"1cb05cfed7c24279be884ba4f6520262","foo","foo","foo" + .with('project', 'list', '--quiet', '--format', 'csv', ['--user', '1cb05cfed7c24279be884ba4f6520262', '--long']) + .returns('"ID","Name","Domain ID","Description","Enabled" +"foo_project_id1","foo","foo_domain_id","",True +"bar_project_id2","bar","bar_domain_id","",True +"foo_project_id2","foo","another_domain_id","",True ') tenant = provider.tenant expect(tenant).to eq('foo') end end - - describe '#tenant=' do + describe '#tenant=', :project_only => true do context 'when using default backend' do it 'sets the tenant' do + provider.instance_variable_get('@property_hash')[:id] = '1cb05cfed7c24279be884ba4f6520262' + provider.instance_variable_get('@property_hash')[:domain] = 'foo_domain' + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","bar","bar_domain_id","bar",True +') provider.class.expects(:openstack) - .with('user', 'set', ['foo', '--project', 'bar']) + .with('role', 'show', '--format', 'shell', '_member_') + .returns('name="_member_"') provider.class.expects(:openstack) - .with('user role', 'list', '--quiet', '--format', 'csv', ['foo', '--project', 'bar']) - .returns('"ID","Name","Project","User" -"9fe2ff9ee4384b1894a90878d3e92bab","_member_","bar","foo" -') + .with('role', 'add', ['_member_', '--project', '2cb05cfed7c24279be884ba4f6520262', '--user', '1cb05cfed7c24279be884ba4f6520262']) provider.tenant=('bar') end end - context 'when using LDAP read-write backend' do it 'sets the tenant when _member_ role exists' do - provider.class.expects(:openstack) - .with('user', 'set', ['foo', '--project', 'bar']) - provider.class.expects(:openstack) - .with('user role', 'list', '--quiet', '--format', 'csv', ['foo', '--project', 'bar']) - .returns('') - provider.class.expects(:openstack) - .with('role', 'show', '--format', 'shell', ['_member_']) - .returns('id="9fe2ff9ee4384b1894a90878d3e92bab" -name="_member_" + provider.instance_variable_get('@property_hash')[:id] = '1cb05cfed7c24279be884ba4f6520262' + provider.instance_variable_get('@property_hash')[:domain] = 'foo_domain' + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","bar","bar_domain_id","bar",True ') provider.class.expects(:openstack) - .with('role', 'add', ['_member_', '--project', 'bar', '--user', 'foo']) + .with('role', 'show', '--format', 'shell', '_member_') + .returns('name="_member_"') + provider.class.expects(:openstack) + .with('role', 'add', ['_member_', '--project', '2cb05cfed7c24279be884ba4f6520262', '--user', '1cb05cfed7c24279be884ba4f6520262']) provider.tenant=('bar') end it 'sets the tenant when _member_ role does not exist' do + provider.instance_variable_get('@property_hash')[:id] = '1cb05cfed7c24279be884ba4f6520262' + provider.instance_variable_get('@property_hash')[:domain] = 'foo_domain' + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","bar","bar_domain_id","bar",True +') provider.class.expects(:openstack) - .with('user', 'set', ['foo', '--project', 'bar']) - provider.class.expects(:openstack) - .with('user role', 'list', '--quiet', '--format', 'csv', ['foo', '--project', 'bar']) - .returns('') - provider.class.expects(:openstack) - .with('role', 'show', '--format', 'shell', ['_member_']) + .with('role', 'show', '--format', 'shell', '_member_') .raises(Puppet::ExecutionFailure, 'no such role _member_') provider.class.expects(:openstack) - .with('role', 'create', '--format', 'shell', ['_member_']) + .with('role', 'create', '--format', 'shell', '_member_') .returns('name="_member_"') provider.class.expects(:openstack) - .with('role', 'add', ['_member_', '--project', 'bar', '--user', 'foo']) - .returns('id="8wr2ff9ee4384b1894a90878d3e92bab" -name="_member_" -') + .with('role', 'add', ['_member_', '--project', '2cb05cfed7c24279be884ba4f6520262', '--user', '1cb05cfed7c24279be884ba4f6520262']) provider.tenant=('bar') end end - -# This doesn't make sense, need to clarify what's happening with LDAP mock -=begin - context 'when using LDAP read-only backend' do + context 'when using LDAP read-only backend', :nohooks => true do it 'sets the tenant when _member_ role exists' do - provider.class.expects(:openstack) - .with('user', 'set', [['foo', '--project', 'bar']]) - .raises(Puppet::ExecutionFailure, 'You are not authorized to perform the requested action: LDAP user update') - provider.class.expects(:openstack) - .with('user role', 'list', '--quiet', '--format', 'csv', [['foo', '--project', 'bar']]) - .returns('') - provider.class.expects(:openstack) - .with('role', 'show', '--format', 'shell', [['_member_']]) - .returns('id="9fe2ff9ee4384b1894a90878d3e92bab" -name="_member_" + provider.instance_variable_get('@property_hash')[:id] = '1cb05cfed7c24279be884ba4f6520262' + provider.instance_variable_get('@property_hash')[:domain] = 'foo_domain' + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","bar","bar_domain_id","bar",True ') provider.class.expects(:openstack) - .with('role', 'add', [['_member_', '--project', 'bar', '--user', 'foo']]) - provider.tenant=('bar') - end - - it 'sets the tenant and gets an unexpected exception message' do + .with('role', 'show', '--format', 'shell', '_member_') + .returns('name="_member_"') provider.class.expects(:openstack) - .with('user', 'set', [['foo', '--project', 'bar']]) - .raises(Puppet::ExecutionFailure, 'unknown error message') - expect{ provider.tenant=('bar') }.to raise_error(Puppet::ExecutionFailure, /unknown error message/) + .with('role', 'add', ['_member_', '--project', '2cb05cfed7c24279be884ba4f6520262', '--user', '1cb05cfed7c24279be884ba4f6520262']) + provider.tenant=('bar') end end -=end end end end @@ -217,6 +312,7 @@ :password => 'foo', :tenant => 'foo', :email => 'foo@example.com', + :domain => 'foo_domain', } end @@ -229,12 +325,25 @@ end shared_examples 'with auth-url environment variable' do - ENV['OS_AUTH_URL'] = 'http://localhost:5000' + ENV['OS_AUTH_URL'] = 'http://127.0.0.1:5000' end it_behaves_like 'with auth-url environment variable' do - it 'checks the password' do - Puppet::Provider::Openstack.stubs(:openstack) + it 'checks the password', :noproject_user_cached => true do + provider.instance_variable_get('@property_hash')[:id] = '1cb05cfed7c24279be884ba4f6520262' + mock_creds = Puppet::Provider::Openstack::CredentialsV3.new + mock_creds.auth_url='http://127.0.0.1:5000' + mock_creds.password='foo' + mock_creds.username='foo' + mock_creds.user_id='1cb05cfed7c24279be884ba4f6520262' + mock_creds.project_id='project-id-1' + Puppet::Provider::Openstack::CredentialsV3.expects(:new).returns(mock_creds) + Puppet::Provider::Openstack.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', ['--user', '1cb05cfed7c24279be884ba4f6520262', '--long']) + .returns('"ID","Name","Domain ID","Description","Enabled" +"project-id-1","foo","foo_domain_id","foo",True +') + Puppet::Provider::Openstack.expects(:openstack) .with('token', 'issue', ['--format', 'value']) .returns('2015-05-14T04:06:05Z e664a386befa4a30878dcef20e79f167 @@ -245,14 +354,48 @@ expect(password).to eq('foo') end - it 'fails the password check' do - Puppet::Provider::Openstack.stubs(:openstack) + it 'fails the password check', :noproject_user_cached => true do + provider.instance_variable_get('@property_hash')[:id] = '1cb05cfed7c24279be884ba4f6520262' + Puppet::Provider::Openstack.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', ['--user', '1cb05cfed7c24279be884ba4f6520262', '--long']) + .returns('"ID","Name","Domain ID","Description","Enabled" +"project-id-1","foo","foo_domain_id","foo",True +') + Puppet::Provider::Openstack.expects(:openstack) .with('token', 'issue', ['--format', 'value']) .raises(Puppet::ExecutionFailure, 'HTTP 401 invalid authentication') password = provider.password expect(password).to eq(nil) end + + it 'checks the password with domain scoped token', :nohooks => true do + provider.instance_variable_get('@property_hash')[:id] = '1cb05cfed7c24279be884ba4f6520262' + provider.instance_variable_get('@property_hash')[:domain] = 'foo_domain' + mock_creds = Puppet::Provider::Openstack::CredentialsV3.new + mock_creds.auth_url='http://127.0.0.1:5000' + mock_creds.password='foo' + mock_creds.username='foo' + mock_creds.user_id='1cb05cfed7c24279be884ba4f6520262' + mock_creds.domain_name='foo_domain' + Puppet::Provider::Openstack::CredentialsV3.expects(:new).returns(mock_creds) + Puppet::Provider::Openstack.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', ['--user', '1cb05cfed7c24279be884ba4f6520262', '--long']) + .returns('"ID","Name","Domain ID","Description","Enabled" +') + Puppet::Provider::Openstack.expects(:openstack) + .with('token', 'issue', ['--format', 'value']) + .returns('2015-05-14T04:06:05Z +e664a386befa4a30878dcef20e79f167 +8dce2ae9ecd34c199d2877bf319a3d06 +ac43ec53d5a74a0b9f51523ae41a29f0 +') + password = provider.password + expect(password).to eq('foo') + end end + end + + describe 'when updating a user with unmanaged password', :nohooks => true do describe 'when updating a user with unmanaged password' do @@ -265,13 +408,239 @@ :replace_password => 'False', :tenant => 'foo', :email => 'foo@example.com', + :domain => 'foo_domain', } end + let(:resource) do + Puppet::Type::Keystone_user.new(user_attrs) + end + + let :provider do + provider_class.new(resource) + end + it 'should not try to check password' do expect(provider.password).to eq('foo') end end + end + + it_behaves_like 'authenticated with environment variables' do + describe 'v3 domains with no domain in resource', :user_cached => true do + let(:user_attrs) do + { + :name => 'foo', + :ensure => 'present', + :enabled => 'True', + :password => 'foo', + :tenant => 'foo', + :email => 'foo@example.com', + } + end + + it 'adds default domain to commands' do + mock = { + 'identity' => {'default_domain_id' => 'foo_domain_id'} + } + Puppet::Util::IniConfig::File.expects(:new).returns(mock) + File.expects(:exists?).with('/etc/keystone/keystone.conf').returns(true) + mock.expects(:read).with('/etc/keystone/keystone.conf') + provider.class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', ['--user', '1cb05cfed7c24279be884ba4f6520262', '--long']) + .returns('"ID","Name" +') + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","bar","bar_domain_id","bar",True +') + provider.class.expects(:openstack) + .with('role', 'show', '--format', 'shell', '_member_') + .returns(' +name="_member_" +') + provider.class.expects(:openstack) + .with('role', 'add', ['_member_', '--project', '1cb05cfed7c24279be884ba4f6520262', '--user', '1cb05cfed7c24279be884ba4f6520262']) + provider.class.expects(:openstack) + .with('user', 'create', '--format', 'shell', ['foo', '--enable', '--password', 'foo', '--email', 'foo@example.com', '--domain', 'foo_domain']) + .returns('email="foo@example.com" +enabled="True" +id="1cb05cfed7c24279be884ba4f6520262" +name="foo" +username="foo" +') + provider.create + expect(provider.exists?).to be_truthy + expect(provider.id).to eq("1cb05cfed7c24279be884ba4f6520262") + end + end + + describe 'v3 domains with domain in resource', :project_only => true do + let(:user_attrs) do + { + :name => 'foo', + :ensure => 'present', + :enabled => 'True', + :password => 'foo', + :tenant => 'foo', + :email => 'foo@example.com', + :domain => 'bar_domain', + } + end + + it 'uses given domain in commands' do + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","bar","bar_domain_id","bar",True +') + provider.class.expects(:openstack) + .with('role', 'show', '--format', 'shell', '_member_') + .returns(' +name="_member_" +') + provider.class.expects(:openstack) + .with('role', 'add', ['_member_', '--project', '1cb05cfed7c24279be884ba4f6520262', '--user', '2cb05cfed7c24279be884ba4f6520262']) + provider.class.expects(:openstack) + .with('user', 'create', '--format', 'shell', ['foo', '--enable', '--password', 'foo', '--email', 'foo@example.com', '--domain', 'bar_domain']) + .returns('email="foo@example.com" +enabled="True" +id="2cb05cfed7c24279be884ba4f6520262" +name="foo" +username="foo" +') + provider.create + expect(provider.exists?).to be_truthy + expect(provider.id).to eq("2cb05cfed7c24279be884ba4f6520262") + end + end + + describe 'v3 domains with domain in name/title', :project_only => true do + let(:user_attrs) do + { + :name => 'foo::bar_domain', + :ensure => 'present', + :enabled => 'True', + :password => 'foo', + :tenant => 'foo', + :email => 'foo@example.com', + } + end + + it 'uses given domain in commands' do + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","bar","bar_domain_id","bar",True +') + provider.class.expects(:openstack) + .with('role', 'show', '--format', 'shell', '_member_') + .returns(' +name="_member_" +') + provider.class.expects(:openstack) + .with('role', 'add', ['_member_', '--project', '1cb05cfed7c24279be884ba4f6520262', '--user', '2cb05cfed7c24279be884ba4f6520262']) + provider.class.expects(:openstack) + .with('user', 'create', '--format', 'shell', ['foo', '--enable', '--password', 'foo', '--email', 'foo@example.com', '--domain', 'bar_domain']) + .returns('email="foo@example.com" +enabled="True" +id="2cb05cfed7c24279be884ba4f6520262" +name="foo" +username="foo" +') + provider.create + expect(provider.exists?).to be_truthy + expect(provider.id).to eq("2cb05cfed7c24279be884ba4f6520262") + expect(provider.name).to eq('foo::bar_domain') + end + end + describe 'v3 domains with domain in name/title and in resource', :project_only => true do + let(:user_attrs) do + { + :name => 'foo::bar_domain', + :ensure => 'present', + :enabled => 'True', + :password => 'foo', + :tenant => 'foo', + :email => 'foo@example.com', + :domain => 'foo_domain', + } + end + + it 'uses the resource domain in commands' do + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","bar","bar_domain_id","bar",True +') + provider.class.expects(:openstack) + .with('role', 'show', '--format', 'shell', '_member_') + .returns(' +name="_member_" +') + provider.class.expects(:openstack) + .with('role', 'add', ['_member_', '--project', '1cb05cfed7c24279be884ba4f6520262', '--user', '2cb05cfed7c24279be884ba4f6520262']) + provider.class.expects(:openstack) + .with('user', 'create', '--format', 'shell', ['foo', '--enable', '--password', 'foo', '--email', 'foo@example.com', '--domain', 'foo_domain']) + .returns('email="foo@example.com" +enabled="True" +id="2cb05cfed7c24279be884ba4f6520262" +name="foo" +username="foo" +') + provider.create + expect(provider.exists?).to be_truthy + expect(provider.id).to eq("2cb05cfed7c24279be884ba4f6520262") + expect(provider.name).to eq('foo::bar_domain') + end + end + + describe 'v3 domains with domain in name/title and in resource and in tenant', :project_only => true do + let(:user_attrs) do + { + :name => 'foo::bar_domain', + :ensure => 'present', + :enabled => 'True', + :password => 'foo', + :tenant => 'foo::foo_domain', + :email => 'foo@example.com', + :domain => 'foo_domain', + } + end + + it 'uses the resource domain in commands' do + project_class.expects(:openstack) + .with('project', 'list', '--quiet', '--format', 'csv', '--long') + .returns('"ID","Name","Domain ID","Description","Enabled" +"1cb05cfed7c24279be884ba4f6520262","foo","foo_domain_id","foo",True +"2cb05cfed7c24279be884ba4f6520262","foo","bar_domain_id","foo",True +') + provider.class.expects(:openstack) + .with('role', 'show', '--format', 'shell', '_member_') + .returns(' +name="_member_" +') + provider.class.expects(:openstack) + .with('role', 'add', ['_member_', '--project', '1cb05cfed7c24279be884ba4f6520262', '--user', '2cb05cfed7c24279be884ba4f6520262']) + provider.class.expects(:openstack) + .with('user', 'create', '--format', 'shell', ['foo', '--enable', '--password', 'foo', '--email', 'foo@example.com', '--domain', 'foo_domain']) + .returns('email="foo@example.com" +enabled="True" +id="2cb05cfed7c24279be884ba4f6520262" +name="foo" +username="foo" +') + provider.create + expect(provider.exists?).to be_truthy + expect(provider.id).to eq("2cb05cfed7c24279be884ba4f6520262") + expect(provider.name).to eq('foo::bar_domain') + end + end end end diff --git a/keystone/spec/unit/provider/keystone_user_role/openstack_spec.rb b/keystone/spec/unit/provider/keystone_user_role/openstack_spec.rb index 2490adc52..51c147535 100644 --- a/keystone/spec/unit/provider/keystone_user_role/openstack_spec.rb +++ b/keystone/spec/unit/provider/keystone_user_role/openstack_spec.rb @@ -2,10 +2,190 @@ require 'spec_helper' require 'puppet/provider/keystone_user_role/openstack' +setup_provider_tests + provider_class = Puppet::Type.type(:keystone_user_role).provider(:openstack) +def user_class + Puppet::Type.type(:keystone_user).provider(:openstack) +end +def project_class + Puppet::Type.type(:keystone_tenant).provider(:openstack) +end describe provider_class do + after :each do + provider_class.reset + end + + # assumes Enabled is the last column - no quotes + def list_to_csv(thelist) + if thelist.is_a?(String) + return '' + end + str="" + thelist.each do |rec| + if rec.is_a?(String) + return '' + end + rec.each do |xx| + if xx.equal?(rec.last) + # True/False have no quotes + if xx == 'True' or xx == 'False' + str = str + xx + "\n" + else + str = str + '"' + xx + '"' + "\n" + end + else + str = str + '"' + xx + '",' + end + end + end + str + end + + def before_need_instances + provider.class.expects(:openstack).once + .with('domain', 'list', '--quiet', '--format', 'csv') + .returns('"ID","Name","Enabled","Description" +"foo_domain_id","foo_domain",True,"foo domain" +"bar_domain_id","bar_domain",True,"bar domain" +"another_domain_id","another_domain",True,"another domain" +"disabled_domain_id","disabled_domain",False,"disabled domain" +') + project_list = [['project-id-1','foo','foo_domain_id','foo project in foo domain','True'], + ['project-id-2','foo','bar_domain_id','foo project in bar domain','True'], + ['project-id-3','bar','foo_domain_id','bar project in foo domain','True'], + ['project-id-4','etc','another_domain_id','another project','True']] + + user_list_for_project = { + 'project-id-1' => [['user-id-1','foo@example.com','foo','foo_domain','foo user','foo@foo_domain','True'], + ['user-id-2','bar@example.com','foo','foo_domain','bar user','bar@foo_domain','True']], + 'project-id-2' => [['user-id-3','foo@bar.com','foo','bar_domain','foo user','foo@bar_domain','True'], + ['user-id-4','bar@bar.com','foo','bar_domain','bar user','bar@bar_domain','True']] + } + user_list_for_project.default = '' + + user_list_for_domain = { + 'foo_domain_id' => [['user-id-1','foo@example.com','foo','foo_domain','foo user','foo@foo_domain','True'], + ['user-id-2','bar@example.com','foo','foo_domain','bar user','bar@foo_domain','True']], + 'bar_domain_id' => [['user-id-3','foo@bar.com','foo','bar_domain','foo user','foo@bar_domain','True'], + ['user-id-4','bar@bar.com','foo','bar_domain','bar user','bar@bar_domain','True']] + } + user_list_for_domain.default = '' + + role_list_for_project_user = { + 'project-id-1' => { + 'user-id-1' => [['role-id-1','foo','foo','foo'], + ['role-id-2','bar','foo','foo']] + }, + 'project-id-2' => { + 'user-id-3' => [['role-id-1','foo','foo','foo'], + ['role-id-2','bar','foo','foo']] + } + } + role_list_for_project_user.default = '' + + role_list_for_domain_user = { + 'foo_domain_id' => { + 'user-id-2' => [['role-id-1','foo','foo_domain','foo'], + ['role-id-2','bar','foo_domain','foo']] + }, + 'bar_domain_id' => { + 'user-id-4' => [['role-id-1','foo','bar_domain','foo'], + ['role-id-2','bar','bar_domain','foo']] + } + } + role_list_for_project_user.default = '' + + provider.class.expects(:openstack).once + .with('project', 'list', '--quiet', '--format', 'csv', ['--long']) + .returns('"ID","Name","Domain ID","Description","Enabled"' + "\n" + list_to_csv(project_list)) + project_list.each do |rec| + csvlist = list_to_csv(user_list_for_project[rec[0]]) + provider.class.expects(:openstack) + .with('user', 'list', '--quiet', '--format', 'csv', ['--long', '--project', rec[0]]) + .returns('"ID","Name","Project","Domain","Description","Email","Enabled"' + "\n" + csvlist) + next if csvlist == '' + user_list_for_project[rec[0]].each do |urec| + csvlist = '' + if role_list_for_project_user.has_key?(rec[0]) and + role_list_for_project_user[rec[0]].has_key?(urec[0]) + csvlist = list_to_csv(role_list_for_project_user[rec[0]][urec[0]]) + end + provider.class.expects(:openstack) + .with('role', 'list', '--quiet', '--format', 'csv', ['--project', rec[0], '--user', urec[0]]) + .returns('"ID","Name","Project","User"' + "\n" + csvlist) + end + end + ['foo_domain_id', 'bar_domain_id'].each do |domid| + csvlist = list_to_csv(user_list_for_domain[domid]) + provider.class.expects(:openstack) + .with('user', 'list', '--quiet', '--format', 'csv', ['--long', '--domain', domid]) + .returns('"ID","Name","Project","Domain","Description","Email","Enabled"' + "\n" + csvlist) + next if csvlist == '' + user_list_for_domain[domid].each do |urec| + csvlist = '' + if role_list_for_domain_user.has_key?(domid) and + role_list_for_domain_user[domid].has_key?(urec[0]) + csvlist = list_to_csv(role_list_for_domain_user[domid][urec[0]]) + end + provider.class.expects(:openstack) + .with('role', 'list', '--quiet', '--format', 'csv', ['--domain', domid, '--user', urec[0]]) + .returns('"ID","Name","Domain","User"' + "\n" + csvlist) + end + end + end + + def before_common(destroy, nolist=false, instances=false) + rolelistprojectuser = [['role-id-1','foo','foo','foo'], + ['role-id-2','bar','foo','foo']] + csvlist = list_to_csv(rolelistprojectuser) + rolelistreturns = ['"ID","Name","Project","User"' + "\n" + csvlist] + nn = 1 + if destroy + rolelistreturns = [''] + nn = 1 + end + unless nolist + provider.class.expects(:openstack).times(nn) + .with('role', 'list', '--quiet', '--format', 'csv', ['--project', 'project-id-1', '--user', 'user-id-1']) + .returns(*rolelistreturns) + end + + userhash = {:id => 'user-id-1', :name => 'foo@example.com'} + usermock = user_class.new(userhash) + unless instances + usermock.expects(:exists?).with(any_parameters).returns(true) + user_class.expects(:new).twice.with(any_parameters).returns(usermock) + end + user_class.expects(:instances).with(any_parameters).returns([usermock]) + + projecthash = {:id => 'project-id-1', :name => 'foo'} + projectmock = project_class.new(projecthash) + unless instances + projectmock.expects(:exists?).with(any_parameters).returns(true) + project_class.expects(:new).with(any_parameters).returns(projectmock) + end + project_class.expects(:instances).with(any_parameters).returns([projectmock]) + end + + before :each, :default => true do + before_common(false) + end + + before :each, :destroy => true do + before_common(true) + end + + before :each, :nolist => true do + before_common(true, true) + end + + before :each, :instances => true do + before_common(true, true, true) + end + shared_examples 'authenticated with environment variables' do ENV['OS_USERNAME'] = 'test' ENV['OS_PASSWORD'] = 'abc123' @@ -31,59 +211,83 @@ provider_class.new(resource) end - before(:each) do - provider.class.stubs(:openstack) - .with('user', 'list', '--quiet', '--format', 'csv', ['foo', '--project', 'foo']) - .returns('"ID","Name","Project","User" -"1cb05cfed7c24279be884ba4f6520262","foo","foo","foo" -') - end - - describe '#create' do + describe '#create', :default => true do it 'adds all the roles to the user' do - provider.class.stubs(:openstack) - .with('role', 'add', ['foo', '--project', 'foo', '--user', 'foo']) - provider.class.stubs(:openstack) - .with('role', 'add', ['bar', '--project', 'foo', '--user', 'foo']) - provider.class.stubs(:openstack) - .with('user role', 'list', '--quiet', '--format', 'csv', ['foo', '--project', 'foo']) - .returns('"ID","Name","Project","User" -"1cb05ed7c24279be884ba4f6520262","foo","foo","foo" -"2cb05ed7c24279be884ba4f6520262","bar","foo","foo" -') + provider.class.expects(:openstack) + .with('role', 'add', ['foo', '--project', 'project-id-1', '--user', 'user-id-1']) + provider.class.expects(:openstack) + .with('role', 'add', ['bar', '--project', 'project-id-1', '--user', 'user-id-1']) provider.create expect(provider.exists?).to be_truthy end end - describe '#destroy' do + describe '#destroy', :destroy => true do it 'removes all the roles from a user' do - provider.class.stubs(:openstack) - .with('user role', 'list', '--quiet', '--format', 'csv', ['foo', '--project', 'foo']) - .returns('"ID","Name","Project","User"') - provider.class.stubs(:openstack) - .with('role', 'remove', ['foo', '--project', 'foo', '--user', 'foo']) - provider.class.stubs(:openstack) - .with('role', 'remove', ['bar', '--project', 'foo', '--user', 'foo']) + provider.instance_variable_get('@property_hash')[:roles] = ['foo', 'bar'] + provider.class.expects(:openstack) + .with('role', 'remove', ['foo', '--project', 'project-id-1', '--user', 'user-id-1']) + provider.class.expects(:openstack) + .with('role', 'remove', ['bar', '--project', 'project-id-1', '--user', 'user-id-1']) provider.destroy expect(provider.exists?).to be_falsey end end - describe '#exists' do + describe '#exists', :default => true do subject(:response) do - provider.class.stubs(:openstack) - .with('user role', 'list', '--quiet', '--format', 'csv', ['foo', '--project', 'foo']) - .returns('"ID","Name","Project","User" -"1cb05ed7c24279be884ba4f6520262","foo","foo","foo" -') response = provider.exists? end it { is_expected.to be_truthy } end + + describe '#instances', :instances => true do + it 'finds every user role' do + provider.class.expects(:openstack) + .with('role', 'list', '--quiet', '--format', 'csv', []) + .returns('"ID","Name" +"foo-role-id","foo" +"bar-role-id","bar" +') + provider.class.expects(:openstack) + .with('role assignment', 'list', '--quiet', '--format', 'csv', []) + .returns(' +"Role","User","Group","Project","Domain" +"foo-role-id","user-id-1","","project-id-1","" +"bar-role-id","user-id-1","","project-id-1","" +') + instances = provider.class.instances + expect(instances.count).to eq(1) + expect(instances[0].name).to eq('foo@example.com@foo') + expect(instances[0].roles).to eq(['foo', 'bar']) + end + end + + describe '#roles=', :nolist => true do + let(:user_role_attrs) do + { + :name => 'foo@foo', + :ensure => 'present', + :roles => ['one', 'two'], + } + end + + it 'applies the new roles' do + provider.instance_variable_get('@property_hash')[:roles] = ['foo', 'bar'] + provider.class.expects(:openstack) + .with('role', 'remove', ['foo', '--project', 'project-id-1', '--user', 'user-id-1']) + provider.class.expects(:openstack) + .with('role', 'remove', ['bar', '--project', 'project-id-1', '--user', 'user-id-1']) + provider.class.expects(:openstack) + .with('role', 'add', ['one', '--project', 'project-id-1', '--user', 'user-id-1']) + provider.class.expects(:openstack) + .with('role', 'add', ['two', '--project', 'project-id-1', '--user', 'user-id-1']) + provider.roles=(['one', 'two']) + end + end end end end diff --git a/keystone/spec/unit/type/keystone_paste_ini_spec.rb b/keystone/spec/unit/type/keystone_paste_ini_spec.rb index 98f7157b6..2eae98a04 100644 --- a/keystone/spec/unit/type/keystone_paste_ini_spec.rb +++ b/keystone/spec/unit/type/keystone_paste_ini_spec.rb @@ -18,6 +18,6 @@ end it 'should accept a valid value' do @keystone_paste_ini[:value] = 'bar' - @keystone_paste_ini[:value].should == 'bar' + expect(@keystone_paste_ini[:value]).to eq('bar') end end diff --git a/keystone/spec/unit/type/keystone_user_spec.rb b/keystone/spec/unit/type/keystone_user_spec.rb new file mode 100644 index 000000000..789af435f --- /dev/null +++ b/keystone/spec/unit/type/keystone_user_spec.rb @@ -0,0 +1,25 @@ +require 'spec_helper' +require 'puppet' +require 'puppet/type/keystone_user' + +describe Puppet::Type.type(:keystone_user) do + + before :each do + @project = Puppet::Type.type(:keystone_user).new( + :name => 'foo', + :domain => 'foo-domain', + ) + + @domain = @project.parameter('domain') + end + + it 'should not be in sync for domain changes' do + expect { @domain.insync?('not-the-domain') }.to raise_error(Puppet::Error, /The domain cannot be changed from/) + expect { @domain.insync?(nil) }.to raise_error(Puppet::Error, /The domain cannot be changed from/) + end + + it 'should be in sync if domain is the same' do + expect(@domain.insync?('foo-domain')).to be true + end + +end diff --git a/kmod/.fixtures.yml b/kmod/.fixtures.yml new file mode 100644 index 000000000..f172a1973 --- /dev/null +++ b/kmod/.fixtures.yml @@ -0,0 +1,5 @@ +fixtures: + repositories: + "stdlib": "git://github.com/puppetlabs/puppetlabs-stdlib.git" + symlinks: + "kmod": "#{source_dir}" diff --git a/kmod/.gitignore b/kmod/.gitignore new file mode 100644 index 000000000..65839fa01 --- /dev/null +++ b/kmod/.gitignore @@ -0,0 +1,10 @@ +pkg/ +Gemfile.lock +vendor/ +spec/fixtures/ +.vagrant/ +.bundle/ +coverage/ +log/ +.*.swp +*~ diff --git a/kmod/.puppet-lint.rc b/kmod/.puppet-lint.rc new file mode 100644 index 000000000..d8f5c59ef --- /dev/null +++ b/kmod/.puppet-lint.rc @@ -0,0 +1,5 @@ +--fail-on-warnings +--relative +--no-80chars +--no-documentation +--no-class_inherits_from_params_class-check diff --git a/kmod/.sync.yml b/kmod/.sync.yml new file mode 100644 index 000000000..d62948585 --- /dev/null +++ b/kmod/.sync.yml @@ -0,0 +1,3 @@ +--- +.travis.yml: + forge_password: "QvaBWqv7ZaB34BzONHPxZy+1OW+eBP4dZmhDobAVFD20kOP4O4QHdCe9kS3y3JRXrNW3wuiuegNzoKb+JvqQrZPQF3Ea0DO4uCPVrnVbrnmGC0p7v2VDA1tsb2snnSMa5mC6+3wYMwS27zRSQ7vN3AwDKMdp/jcPbTEpnv/+314=" diff --git a/kmod/.travis.yml b/kmod/.travis.yml new file mode 100644 index 000000000..e5397c5b7 --- /dev/null +++ b/kmod/.travis.yml @@ -0,0 +1,32 @@ +--- +language: ruby +sudo: false +cache: bundler +bundler_args: --without system_tests +script: ["bundle exec rake validate", "bundle exec rake lint", "bundle exec rake spec SPEC_OPTS='--format documentation'", "bundle exec rake metadata"] +matrix: + fast_finish: true + include: + - rvm: 1.8.7 + env: PUPPET_GEM_VERSION="~> 3.0" FACTER_GEM_VERSION="~> 1.7.0" + - rvm: 1.9.3 + env: PUPPET_GEM_VERSION="~> 3.0" + - rvm: 2.0.0 + env: PUPPET_GEM_VERSION="~> 3.0" + - rvm: 2.0.0 + env: PUPPET_GEM_VERSION="~> 3.0" FUTURE_PARSER="yes" + - rvm: 2.1.6 + env: PUPPET_GEM_VERSION="~> 4.0" +notifications: + email: false +deploy: + provider: puppetforge + user: camptocamp + password: + secure: "QvaBWqv7ZaB34BzONHPxZy+1OW+eBP4dZmhDobAVFD20kOP4O4QHdCe9kS3y3JRXrNW3wuiuegNzoKb+JvqQrZPQF3Ea0DO4uCPVrnVbrnmGC0p7v2VDA1tsb2snnSMa5mC6+3wYMwS27zRSQ7vN3AwDKMdp/jcPbTEpnv/+314=" + on: + tags: true + # all_branches is required to use tags + all_branches: true + # Only publish if our main Ruby target builds + rvm: 1.9.3 diff --git a/kmod/CHANGELOG.md b/kmod/CHANGELOG.md new file mode 100644 index 000000000..54daf6d69 --- /dev/null +++ b/kmod/CHANGELOG.md @@ -0,0 +1,81 @@ +## 2015-08-27 - Release 2.1.0 + +Add minimal SuSE support + +## 2015-08-21 - Release 2.0.11 + +Use docker for acceptance tests + +## 2015-06-26 - Release 2.0.10 + +Fix strict_variables activation with rspec-puppet 2.2 + +## 2015-05-28 - Release 2.0.9 + +Add beaker_spec_helper to Gemfile + +## 2015-05-26 - Release 2.0.8 + +Use random application order in nodeset + +## 2015-05-26 - Release 2.0.7 + +add utopic & vivid nodesets + +## 2015-05-25 - Release 2.0.6 + +Don't allow failure on Puppet 4 + +## 2015-05-13 - Release 2.0.5 + +Add puppet-lint-file_source_rights-check gem + +## 2015-05-12 - Release 2.0.4 + +Don't pin beaker + +## 2015-04-27 - Release 2.0.3 + +Add nodeset ubuntu-12.04-x86_64-openstack + +## 2015-04-18 - Release 2.0.2 + +- Add beaker nodeset + +## 2015-04-15 - Release 2.0.1 + +- Fix kmod::install's file class parameter's default + +## 2015-04-03 - Release 2.0.0 + +- Add kmod::option and refactored everything to use kmod::setting +- removed obsolete generic.pp + +## 2015-03-24 - Release 1.0.6 + +- Lint + +## 2015-01-19 - Release 1.0.5 + +- Fix relative class inclusions + +## 2015-01-07 - Release 1.0.4 + +- Fix unquoted strings in cases + +## 2014-12-16 - Release 1.0.1 + +- Fix for future parser + +## 2014-10-20 - Release 1.0.0 + +- Setup automatic Forge releases + +## 2014-07-02 - Release 0.1.1 + +- Fix deprecation warnings, #22 + +## 2014-07-02 - Release 0.1.0 + +- Add unit tests +- Various improvements diff --git a/kmod/Gemfile b/kmod/Gemfile new file mode 100644 index 000000000..0cb59337f --- /dev/null +++ b/kmod/Gemfile @@ -0,0 +1,47 @@ +source ENV['GEM_SOURCE'] || "https://rubygems.org" + +group :development, :unit_tests do + gem 'rake', :require => false + gem 'rspec', '< 3.2', :require => false if RUBY_VERSION =~ /^1.8/ + gem 'rspec-puppet', :require => false + gem 'puppetlabs_spec_helper', :require => false + gem 'metadata-json-lint', :require => false + gem 'puppet-lint', :require => false + gem 'puppet-lint-unquoted_string-check', :require => false + gem 'puppet-lint-empty_string-check', :require => false + gem 'puppet-lint-spaceship_operator_without_tag-check', :require => false + gem 'puppet-lint-variable_contains_upcase', :require => false + gem 'puppet-lint-absolute_classname-check', :require => false + gem 'puppet-lint-undef_in_function-check', :require => false + gem 'puppet-lint-leading_zero-check', :require => false + gem 'puppet-lint-trailing_comma-check', :require => false + gem 'puppet-lint-file_ensure-check', :require => false + gem 'puppet-lint-version_comparison-check', :require => false + gem 'puppet-lint-fileserver-check', :require => false + gem 'puppet-lint-file_source_rights-check', :require => false + gem 'puppet-lint-alias-check', :require => false + gem 'rspec-puppet-facts', :require => false + gem 'github_changelog_generator', :require => false, :git => 'https://github.com/raphink/github-changelog-generator.git', :branch => 'dev/all_patches' if RUBY_VERSION !~ /^1.8/ + gem 'puppet-blacksmith', :require => false if RUBY_VERSION !~ /^1.8/ +end + +group :system_tests do + gem 'beaker', :require => false + gem 'beaker-rspec', :require => false + gem 'beaker_spec_helper', :require => false + gem 'serverspec', :require => false +end + +if facterversion = ENV['FACTER_GEM_VERSION'] + gem 'facter', facterversion, :require => false +else + gem 'facter', :require => false +end + +if puppetversion = ENV['PUPPET_GEM_VERSION'] + gem 'puppet', puppetversion, :require => false +else + gem 'puppet', :require => false +end + +# vim:ft=ruby diff --git a/kmod/LICENSE b/kmod/LICENSE new file mode 100644 index 000000000..8d968b6cb --- /dev/null +++ b/kmod/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/kmod/README.md b/kmod/README.md new file mode 100644 index 000000000..5b378dbae --- /dev/null +++ b/kmod/README.md @@ -0,0 +1,116 @@ +# Kmod Puppet module + +[![Puppet Forge Version](http://img.shields.io/puppetforge/v/camptocamp/kmod.svg)](https://forge.puppetlabs.com/camptocamp/kmod) +[![Puppet Forge Downloads](http://img.shields.io/puppetforge/dt/camptocamp/kmod.svg)](https://forge.puppetlabs.com/camptocamp/kmod) +[![Build Status](https://img.shields.io/travis/camptocamp/puppet-kmod/master.svg)](https://travis-ci.org/camptocamp/puppet-kmod) +[![Puppet Forge Endorsement](https://img.shields.io/puppetforge/e/camptocamp/kmod.svg)](https://forge.puppetlabs.com/camptocamp/kmod) +[![Gemnasium](https://img.shields.io/gemnasium/camptocamp/puppet-kmod.svg)](https://gemnasium.com/camptocamp/puppet-kmod) +[![By Camptocamp](https://img.shields.io/badge/by-camptocamp-fb7047.svg)](http://www.camptocamp.com) + +## Description + +This module provides definitions to manipulate modprobe.conf (5) stanzas: + + * kmod::alias + * kmod::install + * kmod::blacklist + +It depends on Augeas with the modprobe lens. + +## Usage + +This module has five main defined types: + + * kmod::load + * kmod::alias + * kmod::option + * kmod::install + * kmod::blacklist + + +### kmod::load + +Loads a module using modprobe and manages persistent modules in /etc/sysconfig/modules + +```puppet + kmod::load { 'mymodule': } +``` + +### kmod::alias + +Adds an alias to modprobe.conf, by default `/etc/modprobe.d/.conf` is assumed for a filename. + +```puppet + kmod::alias { 'bond0': + modulename => 'bonding', + } +``` + +Params: +* `modulename`: Name of the module to alias +* `aliasname`: Name of the alias (defaults to the resource title) +* `file`: File to write to (see above default) + +### kmod::option + +Adds an option to modprobe.conf + +```puppet + kmod::option { 'bond0 mode': + module => 'bond0', + option => 'mode', + value => '1', + } + + kmod::option { 'bond0': + option => 'mode', + value => '1', + } +``` + +Params: +* `option`: Name of the parameter to add +* `value`: Value of the parameter +* `module`: Name of the module (if ommited, the resource title is used) +* `file`: File to write to (defaults to `/etc/modprobe.d/.conf`) + +### kmod::blacklist + +Manages modprobe blacklist entries. Blacklist entries prevents module aliases from being used, +but would not prevent the module from being loaded. +To prevent a module from being loaded use `kmod::install` + +```puppet + kmod::blacklist { 'foo': } +``` + +Params: +* `file`: File to write to, defaults to `/etc/modprobe.d/blacklist.conf` + +### kmod::install + +Manage modprobe install entries + +```puppet + kmod::install { 'pcspkr': } +``` + +If you want to ensure that module can't be loaded at all you can do the following: +```puppet + kmod::install { 'dccp': command => '/bin/false' } +``` + +Params: +* `file`: File to write to (defaults to `/etc/modprobe.d/.conf`) +* `command`: (optional) command associated with the install, defaults to `/bin/true` + + + +## Contributing + +Please report bugs and feature request using [GitHub issue +tracker](https://github.com/camptocamp/puppet-kmod/issues). + +For pull requests, it is very much appreciated to check your Puppet manifest +with [puppet-lint](https://github.com/camptocamp/puppet-kmod/issues) to follow the recommended Puppet style guidelines from the +[Puppet Labs style guide](http://docs.puppetlabs.com/guides/style_guide.html). diff --git a/kmod/Rakefile b/kmod/Rakefile new file mode 100644 index 000000000..adcac180c --- /dev/null +++ b/kmod/Rakefile @@ -0,0 +1,23 @@ +require 'puppetlabs_spec_helper/rake_tasks' +require 'puppet-lint/tasks/puppet-lint' + +Rake::Task[:lint].clear +PuppetLint::RakeTask.new :lint do |config| + config.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp", "vendor/**/*.pp"] + config.disable_checks = ['80chars'] + config.fail_on_warnings = true +end + +PuppetSyntax.exclude_paths = ["spec/fixtures/**/*.pp", "vendor/**/*"] + +# Publishing tasks +unless RUBY_VERSION =~ /^1\.8/ + require 'puppet_blacksmith' + require 'puppet_blacksmith/rake_tasks' + require 'github_changelog_generator/task' + GitHubChangelogGenerator::RakeTask.new :changelog do |config| + m = Blacksmith::Modulefile.new + config.future_release = m.version + config.release_url = "https://forge.puppetlabs.com/#{m.author}/#{m.name}/%s" + end +end diff --git a/kmod/manifests/alias.pp b/kmod/manifests/alias.pp new file mode 100644 index 000000000..dcdb974f7 --- /dev/null +++ b/kmod/manifests/alias.pp @@ -0,0 +1,26 @@ +# = Define: kmod::alias +# +# == Example +# +# kmod::alias { 'bond0': +# alias => 'bonding', +# } +# +define kmod::alias( + $source, + $ensure = 'present', + $file = "/etc/modprobe.d/${name}.conf", + $aliasname = $name, +) { + + include ::kmod + + kmod::setting { "kmod::alias ${title}": + module => $aliasname, + file => $file, + category => 'alias', + option => 'modulename', + value => $source, + } + +} diff --git a/kmod/manifests/blacklist.pp b/kmod/manifests/blacklist.pp new file mode 100644 index 000000000..1dddddaa3 --- /dev/null +++ b/kmod/manifests/blacklist.pp @@ -0,0 +1,27 @@ +# +# == Definition: kmod::blacklist +# +# Set a kernel module as blacklisted. +# +# Parameters: +# - *ensure*: present/absent; +# - *file*: optionally, set the file where the stanza is written. +# +# Example usage: +# +# kmod::blacklist { 'pcspkr': } +# +define kmod::blacklist( + $ensure=present, + $file='/etc/modprobe.d/blacklist.conf', +) { + + + kmod::setting { "kmod::blacklist ${title}": + ensure => $ensure, + module => $name, + file => $file, + category => 'blacklist', + } + +} diff --git a/kmod/manifests/init.pp b/kmod/manifests/init.pp new file mode 100644 index 000000000..14da62ba0 --- /dev/null +++ b/kmod/manifests/init.pp @@ -0,0 +1,21 @@ +# +# == Class: kmod +# +# Ensures a couple of mandatory files are present before managing their +# content. +# +# +class kmod { + + if versioncmp($::augeasversion, '0.9.0') < 0 { + fail('Augeas 0.10.0 or higher required') + } + file { '/etc/modprobe.d': ensure => directory } + + file { [ + '/etc/modprobe.d/modprobe.conf', + '/etc/modprobe.d/aliases.conf', + '/etc/modprobe.d/blacklist.conf', + ]: ensure => file, + } +} diff --git a/kmod/manifests/install.pp b/kmod/manifests/install.pp new file mode 100644 index 000000000..8f6bb74c0 --- /dev/null +++ b/kmod/manifests/install.pp @@ -0,0 +1,30 @@ +# +# == Definition: kmod::install +# +# Set a kernel module as installed. +# +# Parameters: +# - *ensure*: present/absent; +# - *command*: optionally, set the command associated with the kernel module; +# - *file*: optionally, set the file where the stanza is written. +# +# Example usage: +# +# kmod::install { 'pcspkr': } +# +define kmod::install( + $ensure=present, + $command='/bin/true', + $file="/etc/modprobe.d/${name}.conf", +) { + + kmod::setting { "kmod::install ${title}": + ensure => $ensure, + module => $name, + file => $file, + category => 'install', + option => 'command', + value => $command, + } + +} diff --git a/kmod/manifests/load.pp b/kmod/manifests/load.pp new file mode 100644 index 000000000..07525681b --- /dev/null +++ b/kmod/manifests/load.pp @@ -0,0 +1,87 @@ +# +# == Definition: kmod::load +# +# Manage a kernel module in /etc/modules. +# +# Parameters: +# - *ensure*: present/absent; +# - *file*: optionally, set the file where the stanza is written. +# +# Example usage: +# +# kmod::load { 'sha256': } +# +define kmod::load( + $ensure=present, + $file='/etc/modules', +) { + + case $ensure { + 'present': { + case $::osfamily { + 'Debian': { + $changes = "clear '${name}'" + } + 'Suse': { + $changes = "set MODULES_LOADED_ON_BOOT/value[.='${name}'] '${name}'" + } + default: { } + } + + exec { "modprobe ${name}": + path => '/bin:/sbin:/usr/bin:/usr/sbin', + unless => "egrep -q '^${name} ' /proc/modules", + } + } + + 'absent': { + case $::osfamily { + 'Debian': { + $changes = "rm '${name}'" + } + 'Suse': { + $changes = "rm MODULES_LOADED_ON_BOOT/value[.='${name}']" + } + default: { } + } + + exec { "modprobe -r ${name}": + path => '/bin:/sbin:/usr/bin:/usr/sbin', + onlyif => "egrep -q '^${name} ' /proc/modules", + } + } + + default: { fail "${module_name}: unknown ensure value ${ensure}" } + } + + case $::osfamily { + 'Debian': { + augeas {"Manage ${name} in ${file}": + incl => $file, + lens => 'Modules.lns', + changes => $changes, + } + } + 'RedHat': { + file { "/etc/sysconfig/modules/${name}.modules": + ensure => $ensure, + mode => '0755', + content => template('kmod/redhat.modprobe.erb'), + } + } + 'Suse': { + $kernelfile = $file ? { + '/etc/modules' => '/etc/sysconfig/kernel', + default => $file, + } + augeas { "sysconfig_kernel_MODULES_LOADED_ON_BOOT_${name}": + lens => 'Shellvars_list.lns', + incl => $kernelfile, + changes => $changes, + } + } + default: { + fail "${module_name}: Unknown OS family ${::osfamily}" + } + } +} diff --git a/kmod/manifests/option.pp b/kmod/manifests/option.pp new file mode 100644 index 000000000..8597138f6 --- /dev/null +++ b/kmod/manifests/option.pp @@ -0,0 +1,35 @@ +# = Define: kmod::alias +# +# == Example +# +# kmod::option { 'bond0': +# option => 'bonding', +# } +# +define kmod::option( + $option, + $value, + $module = $name, + $ensure = 'present', + $file = undef, +) { + + include ::kmod + + $target_file = $file ? { + undef => "/etc/modprobe.d/${module}.conf", + default => $file, + } + + + kmod::setting { "kmod::option ${title}": + ensure => $ensure, + module => $module, + category => 'options', + file => $target_file, + option => $option, + value => $value, + } + +} + diff --git a/kmod/manifests/setting.pp b/kmod/manifests/setting.pp new file mode 100644 index 000000000..b554d1646 --- /dev/null +++ b/kmod/manifests/setting.pp @@ -0,0 +1,45 @@ +# = Define: kmod::setting +# +# == Example +# +# +define kmod::setting( + $file, + $category, + $option = undef, + $value = undef, + $module = $name, + $ensure = 'present', +) { + + include ::kmod + + ensure_resource('file', $file, { 'ensure' => 'file'} ) + case $ensure { + 'present': { + if $option { + $changes = [ + "set ${category}[. = '${module}'] ${module}", + "set ${category}[. = '${module}']/${option} ${value}", + ] + } else { + $changes = [ + "set ${category}[. = '${module}'] ${module}", + ] + } + } + + 'absent': { + $changes = "rm ${category}[. = '${module}']" + } + + default: { fail ( "unknown ensure value ${ensure}" ) } + } + + augeas { "kmod::setting ${title} ${module}": + incl => $file, + lens => 'Modprobe.lns', + changes => $changes, + require => File[$file], + } +} diff --git a/kmod/metadata.json b/kmod/metadata.json new file mode 100644 index 000000000..25cb4b183 --- /dev/null +++ b/kmod/metadata.json @@ -0,0 +1,44 @@ +{ + "name": "camptocamp-kmod", + "version": "2.1.0", + "author": "camptocamp", + "summary": "Manage Linux kernel modules with Puppet", + "license": "Apache-2.0", + "source": "https://github.com/camptocamp/puppet-kmod", + "project_page": "https://github.com/camptocamp/puppet-kmod", + "issues_url": "https://github.com/camptocamp/puppet-kmod/issues", + "description": "Manage Linux kernel modules with Puppet", + "dependencies": [ + + ], + "operatingsystem_support": [ + { + "operatingsystem": "Debian", + "operatingsystemrelease": [ + "6", + "7" + ] + }, + { + "operatingsystem": "Ubuntu", + "operatingsystemrelease": [ + "12.04", + "14.04" + ] + }, + { + "operatingsystem": "SLES", + "operatingsystemrelease": [ + "11" + ] + }, + { + "operatingsystem": "RedHat", + "operatingsystemrelease": [ + "5", + "6", + "7" + ] + } + ] +} diff --git a/kmod/spec/acceptance/nodesets/centos-5-x86_64-docker.yml b/kmod/spec/acceptance/nodesets/centos-5-x86_64-docker.yml new file mode 100644 index 000000000..679afb04d --- /dev/null +++ b/kmod/spec/acceptance/nodesets/centos-5-x86_64-docker.yml @@ -0,0 +1,15 @@ +HOSTS: + centos-5-x64: + default_apply_opts: + order: random + strict_variables: + platform: el-5-x86_64 + hypervisor : docker + image: centos:5 + docker_preserve_image: true + docker_cmd: '["/sbin/init"]' + docker_image_commands: + - 'yum install -y crontabs tar wget' +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/centos-6-x86_64-docker.yml b/kmod/spec/acceptance/nodesets/centos-6-x86_64-docker.yml new file mode 100644 index 000000000..9cab03d08 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/centos-6-x86_64-docker.yml @@ -0,0 +1,15 @@ +HOSTS: + centos-6-x64: + default_apply_opts: + order: random + strict_variables: + platform: el-6-x86_64 + hypervisor : docker + image: centos:6 + docker_preserve_image: true + docker_cmd: '["/sbin/init"]' + docker_image_commands: + - 'yum install -y crontabs tar wget' +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/centos-6-x86_64-openstack.yml b/kmod/spec/acceptance/nodesets/centos-6-x86_64-openstack.yml new file mode 100644 index 000000000..e325b9e90 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/centos-6-x86_64-openstack.yml @@ -0,0 +1,14 @@ +HOSTS: + centos-6-x64: + default_apply_opts: + order: random + strict_variables: + platform: el-6-x86_64 + hypervisor : openstack + flavor: m1.small + image: centos-6-latest + user: root +CONFIG: + type: foss + log_level: debug + openstack_network: default diff --git a/kmod/spec/acceptance/nodesets/centos-6-x86_64-vagrant.yml b/kmod/spec/acceptance/nodesets/centos-6-x86_64-vagrant.yml new file mode 100644 index 000000000..f06036ecc --- /dev/null +++ b/kmod/spec/acceptance/nodesets/centos-6-x86_64-vagrant.yml @@ -0,0 +1,11 @@ +HOSTS: + centos-6-x64: + default_apply_opts: + order: random + strict_variables: + platform: el-6-x86_64 + hypervisor : vagrant + box : camptocamp/centos-6-x86_64 +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/centos-7-x86_64-docker.yml b/kmod/spec/acceptance/nodesets/centos-7-x86_64-docker.yml new file mode 100644 index 000000000..0bc972711 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/centos-7-x86_64-docker.yml @@ -0,0 +1,15 @@ +HOSTS: + centos-7-x64: + default_apply_opts: + order: random + strict_variables: + platform: el-7-x86_64 + hypervisor : docker + image: centos:7 + docker_preserve_image: true + docker_cmd: '["/usr/sbin/init"]' + docker_image_commands: + - 'yum install -y crontabs tar wget' +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/centos-7-x86_64-openstack.yml b/kmod/spec/acceptance/nodesets/centos-7-x86_64-openstack.yml new file mode 100644 index 000000000..9003c8678 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/centos-7-x86_64-openstack.yml @@ -0,0 +1,14 @@ +HOSTS: + centos-7-x64: + default_apply_opts: + order: random + strict_variables: + platform: el-7-x86_64 + hypervisor : openstack + flavor: m1.small + image: centos-7-latest + user: centos +CONFIG: + type: foss + log_level: debug + openstack_network: default diff --git a/kmod/spec/acceptance/nodesets/centos-7-x86_64-vagrant.yml b/kmod/spec/acceptance/nodesets/centos-7-x86_64-vagrant.yml new file mode 100644 index 000000000..95402e546 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/centos-7-x86_64-vagrant.yml @@ -0,0 +1,11 @@ +HOSTS: + centos-7-x64: + default_apply_opts: + order: random + strict_variables: + platform: el-7-x86_64 + hypervisor : vagrant + box : camptocamp/centos-7-x86_64 +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/debian-6-x86_64-docker.yml b/kmod/spec/acceptance/nodesets/debian-6-x86_64-docker.yml new file mode 100644 index 000000000..359dae7dc --- /dev/null +++ b/kmod/spec/acceptance/nodesets/debian-6-x86_64-docker.yml @@ -0,0 +1,15 @@ +HOSTS: + debian-6-x64: + default_apply_opts: + order: random + strict_variables: + platform: debian-6-amd64 + hypervisor : docker + image: debian:6 + docker_preserve_image: true + docker_cmd: '["/sbin/init"]' + docker_image_commands: + - 'apt-get install -y wget' +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/debian-6-x86_64-openstack.yml b/kmod/spec/acceptance/nodesets/debian-6-x86_64-openstack.yml new file mode 100644 index 000000000..c6c192fe4 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/debian-6-x86_64-openstack.yml @@ -0,0 +1,14 @@ +HOSTS: + debian-6-x64: + default_apply_opts: + order: random + strict_variables: + platform: debian-6-amd64 + hypervisor : openstack + flavor: m1.small + image: debian-6-latest + user: debian +CONFIG: + type: foss + log_level: debug + openstack_network: default diff --git a/kmod/spec/acceptance/nodesets/debian-6-x86_64-vagrant.yml b/kmod/spec/acceptance/nodesets/debian-6-x86_64-vagrant.yml new file mode 100644 index 000000000..03db0fa76 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/debian-6-x86_64-vagrant.yml @@ -0,0 +1,11 @@ +HOSTS: + debian-6-x64: + default_apply_opts: + order: random + strict_variables: + platform: debian-6-amd64 + hypervisor : vagrant + box : puppetlabs/debian-6.0.10-64-nocm +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/debian-7-x86_64-docker.yml b/kmod/spec/acceptance/nodesets/debian-7-x86_64-docker.yml new file mode 100644 index 000000000..fc11f5748 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/debian-7-x86_64-docker.yml @@ -0,0 +1,15 @@ +HOSTS: + debian-7-x64: + default_apply_opts: + order: random + strict_variables: + platform: debian-7-amd64 + hypervisor : docker + image: debian:7 + docker_preserve_image: true + docker_cmd: '["/sbin/init"]' + docker_image_commands: + - 'apt-get install -y cron wget' +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/debian-7-x86_64-openstack.yml b/kmod/spec/acceptance/nodesets/debian-7-x86_64-openstack.yml new file mode 100644 index 000000000..017b4c743 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/debian-7-x86_64-openstack.yml @@ -0,0 +1,14 @@ +HOSTS: + debian-7-x64: + default_apply_opts: + order: random + strict_variables: + platform: debian-7-amd64 + hypervisor : openstack + flavor: m1.small + image: debian-7-latest + user: debian +CONFIG: + type: foss + log_level: debug + openstack_network: default diff --git a/kmod/spec/acceptance/nodesets/debian-7-x86_64-vagrant.yml b/kmod/spec/acceptance/nodesets/debian-7-x86_64-vagrant.yml new file mode 100644 index 000000000..8ed1264df --- /dev/null +++ b/kmod/spec/acceptance/nodesets/debian-7-x86_64-vagrant.yml @@ -0,0 +1,11 @@ +HOSTS: + debian-7-x64: + default_apply_opts: + order: random + strict_variables: + platform: debian-7-amd64 + hypervisor : vagrant + box : camptocamp/debian-7-amd64 +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/debian-8-x86_64-docker.yml b/kmod/spec/acceptance/nodesets/debian-8-x86_64-docker.yml new file mode 100644 index 000000000..86a55e156 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/debian-8-x86_64-docker.yml @@ -0,0 +1,15 @@ +HOSTS: + debian-8-x64: + default_apply_opts: + order: random + strict_variables: + platform: debian-8-amd64 + hypervisor : docker + image: debian:8 + docker_preserve_image: true + docker_cmd: '["/sbin/init"]' + docker_image_commands: + - 'apt-get install -y cron wget' +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/debian-8-x86_64-openstack.yml b/kmod/spec/acceptance/nodesets/debian-8-x86_64-openstack.yml new file mode 100644 index 000000000..003b6f4bd --- /dev/null +++ b/kmod/spec/acceptance/nodesets/debian-8-x86_64-openstack.yml @@ -0,0 +1,14 @@ +HOSTS: + debian-8-x64: + default_apply_opts: + order: random + strict_variables: + platform: debian-8-amd64 + hypervisor : openstack + flavor: m1.small + image: debian-8-latest + user: debian +CONFIG: + type: foss + log_level: debug + openstack_network: default diff --git a/kmod/spec/acceptance/nodesets/debian-8-x86_64-vagrant.yml b/kmod/spec/acceptance/nodesets/debian-8-x86_64-vagrant.yml new file mode 100644 index 000000000..5cc7f0c5a --- /dev/null +++ b/kmod/spec/acceptance/nodesets/debian-8-x86_64-vagrant.yml @@ -0,0 +1,11 @@ +HOSTS: + debian-8-x64: + default_apply_opts: + order: random + strict_variables: + platform: debian-8-amd64 + hypervisor : vagrant + box : camptocamp/debian-8-amd64 +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/ubuntu-10.04-x86_64-docker.yml b/kmod/spec/acceptance/nodesets/ubuntu-10.04-x86_64-docker.yml new file mode 100644 index 000000000..933dee605 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/ubuntu-10.04-x86_64-docker.yml @@ -0,0 +1,13 @@ +HOSTS: + ubuntu-1004-x64: + default_apply_opts: + order: random + strict_variables: + platform: ubuntu-10.04-amd64 + hypervisor : docker + image: ubuntu:10.04 + # This stops the image from being deleted on completion, speeding up the process. + docker_preserve_image: true +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/ubuntu-12.04-x86_64-docker.yml b/kmod/spec/acceptance/nodesets/ubuntu-12.04-x86_64-docker.yml new file mode 100644 index 000000000..f0ec72b86 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/ubuntu-12.04-x86_64-docker.yml @@ -0,0 +1,15 @@ +HOSTS: + ubuntu-1204-x64: + default_apply_opts: + order: random + strict_variables: + platform: ubuntu-12.04-amd64 + hypervisor : docker + image: ubuntu:12.04 + docker_preserve_image: true + docker_cmd: '["/sbin/init"]' + docker_image_commands: + - 'apt-get install -y wget' +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/ubuntu-12.04-x86_64-openstack.yml b/kmod/spec/acceptance/nodesets/ubuntu-12.04-x86_64-openstack.yml new file mode 100644 index 000000000..f81b04b74 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/ubuntu-12.04-x86_64-openstack.yml @@ -0,0 +1,14 @@ +HOSTS: + ubuntu-1204-x64: + default_apply_opts: + order: random + strict_variables: + platform: ubuntu-12.04-amd64 + hypervisor : openstack + flavor: m1.small + image: ubuntu-1204-latest + user: ubuntu +CONFIG: + type: foss + log_level: debug + openstack_network: default diff --git a/kmod/spec/acceptance/nodesets/ubuntu-14.04-x86_64-docker.yml b/kmod/spec/acceptance/nodesets/ubuntu-14.04-x86_64-docker.yml new file mode 100644 index 000000000..6fb9281e5 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/ubuntu-14.04-x86_64-docker.yml @@ -0,0 +1,15 @@ +HOSTS: + ubuntu-1404-x64: + default_apply_opts: + order: random + strict_variables: + platform: ubuntu-14.04-amd64 + hypervisor : docker + image: ubuntu:14.04 + docker_preserve_image: true + docker_cmd: '["/sbin/init"]' + docker_image_commands: + - 'apt-get install -y wget' +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/ubuntu-14.04-x86_64-openstack.yml b/kmod/spec/acceptance/nodesets/ubuntu-14.04-x86_64-openstack.yml new file mode 100644 index 000000000..2eeb912d9 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/ubuntu-14.04-x86_64-openstack.yml @@ -0,0 +1,14 @@ +HOSTS: + ubuntu-1404-x64: + default_apply_opts: + order: random + strict_variables: + platform: ubuntu-14.04-amd64 + hypervisor : openstack + flavor: m1.small + image: ubuntu-1404-latest + user: ubuntu +CONFIG: + type: foss + log_level: debug + openstack_network: default diff --git a/kmod/spec/acceptance/nodesets/ubuntu-14.04-x86_64-vagrant.yml b/kmod/spec/acceptance/nodesets/ubuntu-14.04-x86_64-vagrant.yml new file mode 100644 index 000000000..3b3769539 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/ubuntu-14.04-x86_64-vagrant.yml @@ -0,0 +1,11 @@ +HOSTS: + ubuntu-1404-x64: + default_apply_opts: + order: random + strict_variables: + platform: ubuntu-14.04-amd64 + hypervisor : vagrant + box : puppetlabs/ubuntu-14.04-64-nocm +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/ubuntu-14.10-x86_64-docker.yml b/kmod/spec/acceptance/nodesets/ubuntu-14.10-x86_64-docker.yml new file mode 100644 index 000000000..2be425c54 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/ubuntu-14.10-x86_64-docker.yml @@ -0,0 +1,15 @@ +HOSTS: + ubuntu-1410-x64: + default_apply_opts: + order: random + strict_variables: + platform: ubuntu-14.10-amd64 + hypervisor : docker + image: ubuntu:14.10 + docker_preserve_image: true + docker_cmd: '["/sbin/init"]' + docker_image_commands: + - 'apt-get install -y wget' +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/ubuntu-14.10-x86_64-openstack.yml b/kmod/spec/acceptance/nodesets/ubuntu-14.10-x86_64-openstack.yml new file mode 100644 index 000000000..58a2acd26 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/ubuntu-14.10-x86_64-openstack.yml @@ -0,0 +1,14 @@ +HOSTS: + ubuntu-1410-x64: + default_apply_opts: + order: random + strict_variables: + platform: ubuntu-14.10-amd64 + hypervisor : openstack + flavor: m1.small + image: ubuntu-1410-latest + user: ubuntu +CONFIG: + type: foss + log_level: debug + openstack_network: default diff --git a/kmod/spec/acceptance/nodesets/ubuntu-15.04-x86_64-docker.yml b/kmod/spec/acceptance/nodesets/ubuntu-15.04-x86_64-docker.yml new file mode 100644 index 000000000..caed722c2 --- /dev/null +++ b/kmod/spec/acceptance/nodesets/ubuntu-15.04-x86_64-docker.yml @@ -0,0 +1,15 @@ +HOSTS: + ubuntu-1504-x64: + default_apply_opts: + order: random + strict_variables: + platform: ubuntu-15.04-amd64 + hypervisor : docker + image: ubuntu:15.04 + docker_preserve_image: true + docker_cmd: '["/sbin/init"]' + docker_image_commands: + - 'apt-get install -y wget' +CONFIG: + type: foss + log_level: debug diff --git a/kmod/spec/acceptance/nodesets/ubuntu-15.04-x86_64-openstack.yml b/kmod/spec/acceptance/nodesets/ubuntu-15.04-x86_64-openstack.yml new file mode 100644 index 000000000..22ef76c4e --- /dev/null +++ b/kmod/spec/acceptance/nodesets/ubuntu-15.04-x86_64-openstack.yml @@ -0,0 +1,14 @@ +HOSTS: + ubuntu-1504-x64: + default_apply_opts: + order: random + strict_variables: + platform: ubuntu-15.04-amd64 + hypervisor : openstack + flavor: m1.small + image: ubuntu-1504-latest + user: ubuntu +CONFIG: + type: foss + log_level: debug + openstack_network: default diff --git a/kmod/spec/classes/kmod_spec.rb b/kmod/spec/classes/kmod_spec.rb new file mode 100644 index 000000000..f37f8c096 --- /dev/null +++ b/kmod/spec/classes/kmod_spec.rb @@ -0,0 +1,26 @@ +require 'spec_helper' + +describe 'kmod', :type => :class do + + on_supported_os.each do |os, facts| + context "on #{os} with augeas 0.8.9" do + let(:facts) do facts.merge({:augeasversion => '0.8.9'}) end + it do + expect { + should compile + }.to raise_error(/Augeas 0.10.0 or higher required/) + end + end + context "on #{os}" do + let(:facts) do + facts.merge( { :augeasversion => '1.2.0' } ) + end + + it { should contain_class('kmod') } + it { should contain_file('/etc/modprobe.d').with({ 'ensure' => 'directory' }) } + ['modprobe.conf','aliases.conf','blacklist.conf'].each do |file| + it { should contain_file("/etc/modprobe.d/#{file}").with({ 'ensure' => 'file' }) } + end + end + end +end diff --git a/kmod/spec/defines/kmod_alias_spec.rb b/kmod/spec/defines/kmod_alias_spec.rb new file mode 100644 index 000000000..298009e40 --- /dev/null +++ b/kmod/spec/defines/kmod_alias_spec.rb @@ -0,0 +1,42 @@ +require 'spec_helper' + +describe 'kmod::alias', :type => :define do + let(:title) { 'foo' } + + on_supported_os.each do |os, facts| + context "on #{os}" do + let(:facts) do + facts.merge( {:augeasversion => '1.2.0'} ) + end + + let(:default_params) do { :source =>'bar', :file => '/baz' } end + + context 'when a file is specified' do + let(:params) do default_params end + it { should contain_kmod__alias('foo') } + it { should contain_kmod__setting('kmod::alias foo') .with({ + 'ensure' => 'present', + 'module' => 'foo', + 'file' => '/baz', + 'category' => 'alias', + 'option' => 'modulename', + 'value' => 'bar' + }) } + end + + context 'when a file is specified and an aliasname' do + let(:params) do default_params.merge!({ :aliasname => 'tango' }) end + it { should contain_kmod__alias('foo') } + it { should contain_kmod__setting('kmod::alias foo') .with({ + 'ensure' => 'present', + 'module' => 'tango', + 'file' => '/baz', + 'category' => 'alias', + 'option' => 'modulename', + 'value' => 'bar' + }) } + end + + end + end +end diff --git a/kmod/spec/defines/kmod_blacklist_spec.rb b/kmod/spec/defines/kmod_blacklist_spec.rb new file mode 100644 index 000000000..908eebd65 --- /dev/null +++ b/kmod/spec/defines/kmod_blacklist_spec.rb @@ -0,0 +1,49 @@ +require 'spec_helper' + +describe 'kmod::blacklist', :type => :define do + let(:title) { 'foo' } + + on_supported_os.each do |os, facts| + context "on #{os}" do + let(:facts) do + facts.merge({ + :augeasversion => '1.2.0', + }) + end + + context 'when ensure is set to present' do + let(:params) do { :ensure => 'present', :file => '/bar/baz' } end + it { should contain_kmod__blacklist('foo') } + it { should contain_kmod__setting('kmod::blacklist foo').with({ + 'ensure' => 'present', + 'category' => 'blacklist', + 'module' => 'foo', + 'file' => '/bar/baz' + }) } + end + + + context 'when file is not specified' do + let(:params) do { :ensure => 'present' } end + it { should contain_kmod__blacklist('foo') } + it { should contain_kmod__setting('kmod::blacklist foo').with({ + 'ensure' => 'present', + 'category' => 'blacklist', + 'module' => 'foo', + 'file' => '/etc/modprobe.d/blacklist.conf' + }) } + end + + context 'when ensure is set to absent' do + let(:params) do { :ensure => 'absent', :file => '/bar/baz' } end + it { should contain_kmod__blacklist('foo') } + it { should contain_kmod__setting('kmod::blacklist foo').with({ + 'ensure' => 'absent', + 'category' => 'blacklist', + 'module' => 'foo', + 'file' => '/bar/baz' + }) } + end + end + end +end diff --git a/kmod/spec/defines/kmod_install_spec.rb b/kmod/spec/defines/kmod_install_spec.rb new file mode 100644 index 000000000..a8937ccf9 --- /dev/null +++ b/kmod/spec/defines/kmod_install_spec.rb @@ -0,0 +1,26 @@ +require 'spec_helper' + +describe 'kmod::install', :type => :define do + let(:title) { 'foo' } + + on_supported_os.each do |os, facts| + context "on #{os}" do + let(:facts) do + facts.merge({ + :augeasversion => '1.2.0', + }) + end + + let(:params) do { :ensure => 'present', :command => '/bin/true', :file => '/etc/modprobe.d/modprobe.conf' } end + it { should contain_kmod__install('foo') } + it { should contain_kmod__setting('kmod::install foo').with({ + 'ensure' => 'present', + 'category' => 'install', + 'module' => 'foo', + 'option' => 'command', + 'value' => '/bin/true', + 'file' => '/etc/modprobe.d/modprobe.conf' + }) } + end + end +end diff --git a/kmod/spec/defines/kmod_load_spec.rb b/kmod/spec/defines/kmod_load_spec.rb new file mode 100644 index 000000000..0e124b23e --- /dev/null +++ b/kmod/spec/defines/kmod_load_spec.rb @@ -0,0 +1,67 @@ +require 'spec_helper' + +describe 'kmod::load', :type => :define do + let(:title) { 'foo' } + + on_supported_os.each do |os, facts| + context "on #{os}" do + let(:facts) do + facts.merge( { :augeasversion => '1.2.0' } ) + end + + context 'with ensure set to present' do + let(:params) do { :ensure => 'present', :file => '/foo/bar' } end + it { should contain_kmod__load('foo') } + it { should contain_exec('modprobe foo').with({'unless' => "egrep -q '^foo ' /proc/modules"}) } + + case facts[:osfamily] + when 'Debian' + it { should contain_augeas('Manage foo in /foo/bar').with({ + 'incl' => '/foo/bar', + 'lens' => 'Modules.lns', + 'changes' => "clear 'foo'" + }) } + when 'Suse' + it { should contain_augeas('sysconfig_kernel_MODULES_LOADED_ON_BOOT_foo').with({ + 'incl' => '/foo/bar', + 'lens' => 'Shellvars_list.lns', + 'changes' => "set MODULES_LOADED_ON_BOOT/value[.='foo'] 'foo'" + }) } + when 'RedHat' + it { should contain_file('/etc/sysconfig/modules/foo.modules').with({ + 'ensure' => 'present', + 'mode' => '0755', + 'content' => /exec \/sbin\/modprobe foo > \/dev\/null 2>&1/ + })} + end + end + + context 'with ensure set to absent' do + let(:params) do { :ensure => 'absent', :file => '/foo/bar' } end + it { should contain_kmod__load('foo') } + it { should contain_exec('modprobe -r foo').with({ 'onlyif' => "egrep -q '^foo ' /proc/modules" }) } + + case facts[:osfamily] + when 'Debian' + it { should contain_augeas('Manage foo in /foo/bar').with({ + 'incl' => '/foo/bar', + 'lens' => 'Modules.lns', + 'changes' => "rm 'foo'" + })} + when 'Suse' + it { should contain_augeas('sysconfig_kernel_MODULES_LOADED_ON_BOOT_foo').with({ + 'incl' => '/foo/bar', + 'lens' => 'Shellvars_list.lns', + 'changes' => "rm MODULES_LOADED_ON_BOOT/value[.='foo']" + }) } + when 'RedHat' + it { should contain_file('/etc/sysconfig/modules/foo.modules').with({ + 'ensure' => 'absent', + 'mode' => '0755', + 'content' => /exec \/sbin\/modprobe foo > \/dev\/null 2>&1/ + })} + end + end + end + end +end diff --git a/kmod/spec/defines/kmod_setting_spec.rb b/kmod/spec/defines/kmod_setting_spec.rb new file mode 100644 index 000000000..5f3673427 --- /dev/null +++ b/kmod/spec/defines/kmod_setting_spec.rb @@ -0,0 +1,40 @@ +require 'spec_helper' + +describe 'kmod::setting', :type => :define do + let(:title) { 'foo' } + + on_supported_os.each do |os, facts| + context "on #{os}" do + let(:facts) do + facts.merge({ + :augeasversion => '1.2.0', + }) + end + + let(:default_params) do { :file => 'modprobe.conf' } end + let(:params) do default_params end + + context 'add an alias' do + let(:params) do default_params.merge({ :category => 'alias', :option => 'modulename', :value => 'tango' }) end + it { should contain_kmod__setting('foo')} + it { should contain_augeas('kmod::setting foo foo').with({ + 'incl' => 'modprobe.conf', + 'lens' => 'Modprobe.lns', + 'changes' => [ "set alias[. = 'foo'] foo", "set alias[. = 'foo']/modulename tango" ], + 'require' => 'File[modprobe.conf]' + })} + end + context 'add a blacklist' do + let(:params) do { :file => '/etc/modprobe.d/blacklist.conf', :category => 'blacklist' } end + it { should contain_kmod__setting('foo')} + it { should contain_augeas('kmod::setting foo foo').with({ + 'incl' => '/etc/modprobe.d/blacklist.conf', + 'lens' => 'Modprobe.lns', + 'changes' => [ "set blacklist[. = 'foo'] foo" ], + 'require' => 'File[/etc/modprobe.d/blacklist.conf]' + })} + end + + end + end +end diff --git a/kmod/spec/spec.opts b/kmod/spec/spec.opts new file mode 100644 index 000000000..91cd6427e --- /dev/null +++ b/kmod/spec/spec.opts @@ -0,0 +1,6 @@ +--format +s +--colour +--loadby +mtime +--backtrace diff --git a/kmod/spec/spec_helper.rb b/kmod/spec/spec_helper.rb new file mode 100644 index 000000000..94d30d5ce --- /dev/null +++ b/kmod/spec/spec_helper.rb @@ -0,0 +1,42 @@ +require 'puppetlabs_spec_helper/module_spec_helper' +require 'rspec-puppet-facts' +include RspecPuppetFacts + + +RSpec.configure do |c| + c.include PuppetlabsSpec::Files + + c.before :each do + # Store any environment variables away to be restored later + @old_env = {} + ENV.each_key {|k| @old_env[k] = ENV[k]} + + c.strict_variables = Gem::Version.new(Puppet.version) >= Gem::Version.new('3.5') + Puppet.features.stubs(:root?).returns(true) + end + + c.after :each do + PuppetlabsSpec::Files.cleanup + end +end + +require 'pathname' +dir = Pathname.new(__FILE__).parent +Puppet[:modulepath] = File.join(dir, 'fixtures', 'modules') + +# There's no real need to make this version dependent, but it helps find +# regressions in Puppet +# +# 1. Workaround for issue #16277 where default settings aren't initialised from +# a spec and so the libdir is never initialised (3.0.x) +# 2. Workaround for 2.7.20 that now only loads types for the current node +# environment (#13858) so Puppet[:modulepath] seems to get ignored +# 3. Workaround for 3.5 where context hasn't been configured yet, +# ticket https://tickets.puppetlabs.com/browse/MODULES-823 +# +ver = Gem::Version.new(Puppet.version.split('-').first) +if Gem::Requirement.new("~> 2.7.20") =~ ver || Gem::Requirement.new("~> 3.0.0") =~ ver || Gem::Requirement.new("~> 3.5") =~ ver || Gem::Requirement.new("~> 4.0") + puts "augeasproviders: setting Puppet[:libdir] to work around broken type autoloading" + # libdir is only a single dir, so it can only workaround loading of one external module + Puppet[:libdir] = "#{Puppet[:modulepath]}/augeasproviders_core/lib" +end diff --git a/kmod/templates/redhat.modprobe.erb b/kmod/templates/redhat.modprobe.erb new file mode 100644 index 000000000..389df3418 --- /dev/null +++ b/kmod/templates/redhat.modprobe.erb @@ -0,0 +1,5 @@ +#!/bin/sh + +# file managed by puppet + +exec /sbin/modprobe <%= @name %> > /dev/null 2>&1 diff --git a/manila/CHANGELOG.md b/manila/CHANGELOG.md new file mode 100644 index 000000000..5cc2dcc87 --- /dev/null +++ b/manila/CHANGELOG.md @@ -0,0 +1,4 @@ +##2015-07-08 - 6.0.0 +###Summary + +- Initial release of the puppet-manila module diff --git a/manila/README.md b/manila/README.md index b352daf4e..56df97022 100644 --- a/manila/README.md +++ b/manila/README.md @@ -1,7 +1,7 @@ manila ======= -1.0.0 - 2014.2.0 - Juno +6.0.0 - 2015.1.0 - Kilo #### Table of Contents diff --git a/manila/manifests/init.pp b/manila/manifests/init.pp index 9725ca59b..3ab7172c4 100644 --- a/manila/manifests/init.pp +++ b/manila/manifests/init.pp @@ -279,8 +279,8 @@ 'oslo_messaging_rabbit/rabbit_userid': value => $rabbit_userid; 'oslo_messaging_rabbit/rabbit_virtual_host': value => $rabbit_virtual_host; 'oslo_messaging_rabbit/rabbit_use_ssl': value => $rabbit_use_ssl; - 'DEFAULT/control_exchange': value => $control_exchange; - 'DEFAULT/amqp_durable_queues': value => $amqp_durable_queues; + 'DEFAULT/control_exchange': value => $control_exchange; + 'oslo_messaging_rabbit/amqp_durable_queues': value => $amqp_durable_queues; } if $rabbit_hosts { diff --git a/manila/metadata.json b/manila/metadata.json index 8e3fbb96a..b930a28b4 100644 --- a/manila/metadata.json +++ b/manila/metadata.json @@ -1,10 +1,10 @@ { - "name": "stackforge-manila", - "version": "5.0.0", - "author": "NetApp and StackForge Contributors", + "name": "openstack-manila", + "version": "6.0.0", + "author": "NetApp and OpenStack Contributors", "summary": "Puppet module for OpenStack Manila", "license": "Apache-2.0", - "source": "git://github.com/stackforge/puppet-manila.git", + "source": "git://github.com/openstack/puppet-manila.git", "project_page": "https://launchpad.net/puppet-manila", "issues_url": "https://bugs.launchpad.net/puppet-manila", "requirements": [ @@ -33,10 +33,10 @@ "dependencies": [ { "name": "dprince/qpid", "version_requirement": ">=1.0.0 <2.0.0" }, { "name": "puppetlabs/inifile", "version_requirement": ">=1.0.0 <2.0.0" }, - { "name": "stackforge/keystone", "version_requirement": ">=5.0.0 <6.0.0" }, - { "name": "stackforge/glance", "version_requirement": ">=5.0.0 <6.0.0" }, + { "name": "openstack/keystone", "version_requirement": ">=6.0.0 <7.0.0" }, + { "name": "openstack/glance", "version_requirement": ">=6.0.0 <7.0.0" }, { "name": "puppetlabs/rabbitmq", "version_requirement": ">=2.0.2 <6.0.0" }, { "name": "puppetlabs/stdlib", "version_requirement": ">=4.0.0 <5.0.0" }, - { "name": "stackforge/openstacklib", "version_requirement": ">=5.0.0 <6.0.0" } + { "name": "openstack/openstacklib", "version_requirement": ">=6.0.0 <7.0.0" } ] } diff --git a/manila/spec/classes/manila_spec.rb b/manila/spec/classes/manila_spec.rb index 4fe4ca38f..6d2fef593 100644 --- a/manila/spec/classes/manila_spec.rb +++ b/manila/spec/classes/manila_spec.rb @@ -287,7 +287,7 @@ req_params end - it { is_expected.to contain_manila_config('DEFAULT/amqp_durable_queues').with_value(false) } + it { is_expected.to contain_manila_config('oslo_messaging_rabbit/amqp_durable_queues').with_value(false) } end describe 'with amqp_durable_queues enabled' do @@ -297,7 +297,7 @@ }) end - it { is_expected.to contain_manila_config('DEFAULT/amqp_durable_queues').with_value(true) } + it { is_expected.to contain_manila_config('oslo_messaging_rabbit/amqp_durable_queues').with_value(true) } end describe 'with sqlite' do diff --git a/manila/spec/spec_helper_acceptance.rb b/manila/spec/spec_helper_acceptance.rb index 429e807c4..144b31e3f 100644 --- a/manila/spec/spec_helper_acceptance.rb +++ b/manila/spec/spec_helper_acceptance.rb @@ -38,7 +38,7 @@ zuul_clone_cmd += "git://git.openstack.org #{repo}" on host, zuul_clone_cmd else - on host, "git clone https://git.openstack.org/#{repo} #{repo}" + on host, "git clone -b stable/kilo https://git.openstack.org/#{repo} #{repo}" end on host, "ZUUL_REF=#{zuul_ref} ZUUL_BRANCH=#{zuul_branch} ZUUL_URL=#{zuul_url} bash #{repo}/install_modules.sh" diff --git a/mongodb/lib/puppet/provider/mongodb.rb b/mongodb/lib/puppet/provider/mongodb.rb index 101f3fd6a..c0670769f 100644 --- a/mongodb/lib/puppet/provider/mongodb.rb +++ b/mongodb/lib/puppet/provider/mongodb.rb @@ -80,7 +80,11 @@ def self.mongo_eval(cmd, db = 'admin') cmd = mongorc_file + cmd end - out = mongo([db, '--quiet', '--host', get_conn_string, '--eval', cmd]) + if ipv6_is_enabled + out = mongo([db, '--quiet', '--ipv6', '--host', get_conn_string, '--eval', cmd]) + else + out = mongo([db, '--quiet', '--host', get_conn_string, '--eval', cmd]) + end out.gsub!(/ObjectId\(([^)]*)\)/, '\1') out diff --git a/mongodb/lib/puppet/provider/mongodb_replset/mongo.rb b/mongodb/lib/puppet/provider/mongodb_replset/mongo.rb index ca3fcf845..0b5c0347d 100644 --- a/mongodb/lib/puppet/provider/mongodb_replset/mongo.rb +++ b/mongodb/lib/puppet/provider/mongodb_replset/mongo.rb @@ -113,6 +113,22 @@ def self.get_mongod_conf_file file end + def self.ipv6_is_enabled + file = get_mongod_conf_file + config = YAML.load_file(file) + if config.kind_of?(Hash) + ipv6 = config['net.ipv6'] + else # It has to be a key-value store + config = {} + File.readlines(file).collect do |line| + k,v = line.split('=') + config[k.rstrip] = v.lstrip.chomp if k and v + end + ipv6 = config['ipv6'] + end + ipv6 + end + def self.get_replset_properties conn_string = get_conn_string @@ -229,11 +245,13 @@ def mongo_command(command, host, retries=4) end def self.mongo_command(command, host=nil, retries=4) + has_ipv6 = ipv6_is_enabled # Allow waiting for mongod to become ready # Wait for 2 seconds initially and double the delay at each retry wait = 2 begin args = Array.new + args << '--ipv6' if has_ipv6 args << '--quiet' args << ['--host',host] if host args << ['--eval',"printjson(#{command})"] diff --git a/n1k_vsm/.fixtures.yml b/n1k_vsm/.fixtures.yml new file mode 100644 index 000000000..d54b7e67d --- /dev/null +++ b/n1k_vsm/.fixtures.yml @@ -0,0 +1,5 @@ +fixtures: + repositories: + 'stdlib': 'git://github.com/puppetlabs/puppetlabs-stdlib.git' + symlinks: + "n1k_vsm": "#{source_dir}" diff --git a/n1k_vsm/Gemfile b/n1k_vsm/Gemfile index 8de97162d..6d4ce9a07 100644 --- a/n1k_vsm/Gemfile +++ b/n1k_vsm/Gemfile @@ -1,8 +1,30 @@ -source 'https://rubygems.org' +source ENV['GEM_SOURCE'] || "https://rubygems.org" group :development, :test do - gem 'puppetlabs_spec_helper', :require => false - gem 'puppet-lint', '~> 0.3.2' + gem 'puppetlabs_spec_helper', :require => 'false' + gem 'rspec-puppet', '~> 2.2.0', :require => 'false' + gem 'metadata-json-lint', :require => 'false' + gem 'puppet-lint-param-docs', :require => 'false' + gem 'puppet-lint-absolute_classname-check', :require => 'false' + gem 'puppet-lint-absolute_template_path', :require => 'false' + gem 'puppet-lint-trailing_newline-check', :require => 'false' + gem 'puppet-lint-unquoted_string-check', :require => 'false' + gem 'puppet-lint-leading_zero-check', :require => 'false' + gem 'puppet-lint-variable_contains_upcase', :require => 'false' + gem 'puppet-lint-numericvariable', :require => 'false' + gem 'json', :require => 'false' + gem 'webmock', :require => 'false' +end + +group :system_tests do + gem 'beaker-rspec', :require => 'false' + gem 'beaker-puppet_install_helper', :require => 'false' +end + +if facterversion = ENV['FACTER_GEM_VERSION'] + gem 'facter', facterversion, :require => false +else + gem 'facter', :require => false end if puppetversion = ENV['PUPPET_GEM_VERSION'] @@ -10,3 +32,5 @@ if puppetversion = ENV['PUPPET_GEM_VERSION'] else gem 'puppet', :require => false end + +# vim:ft=ruby diff --git a/n1k_vsm/manifests/deploy.pp b/n1k_vsm/manifests/deploy.pp index b39afe6cb..4427b264c 100644 --- a/n1k_vsm/manifests/deploy.pp +++ b/n1k_vsm/manifests/deploy.pp @@ -5,14 +5,31 @@ # class n1k_vsm::deploy { + require ::n1k_vsm + include ::n1k_vsm + #ensure tap interfaces and deploy the vsm $ctrltap = 'vsm-ctrl0' $mgmttap = 'vsm-mgmt0' $pkttap = 'vsm-pkt0' + # Validate and get the array of digits for the vsm_mac_base (or use default) + # Using _vmb as the name for the final string to increase readability + $tmp_mac_base = regsubst($n1k_vsm::vsm_mac_base, '[^0-9a-fA-F]+', '') + if size($tmp_mac_base) < 7 { + $vmb = split('005dc79', '') + } else { + $vmb = split($tmp_mac_base, '') + } + + # Generate MACs for VSM + $ctrlmac = "52:54:${vmb[0]}${vmb[1]}:${vmb[2]}${vmb[3]}:${vmb[4]}${vmb[5]}:${vmb[6]}1" + $mgmtmac = "52:54:${vmb[0]}${vmb[1]}:${vmb[2]}${vmb[3]}:${vmb[4]}${vmb[5]}:${vmb[6]}2" + $pktmac = "52:54:${vmb[0]}${vmb[1]}:${vmb[2]}${vmb[3]}:${vmb[4]}${vmb[5]}:${vmb[6]}3" + exec { 'Exec_create_disk': command => "/usr/bin/qemu-img create -f raw ${n1k_vsm::diskfile} ${n1k_vsm::disksize}G", - unless => "/usr/bin/virsh list --all | grep -c ${n1k_vsm::vsmname}", + creates => $n1k_vsm::diskfile, } $targetxmlfile = "/var/spool/cisco/vsm/vsm_${n1k_vsm::vsm_role}_deploy.xml" @@ -21,19 +38,49 @@ owner => 'root', group => 'root', mode => '0666', + seltype => 'virt_content_t', content => template('n1k_vsm/vsm_vm.xml.erb'), require => Exec['Exec_create_disk'], } - exec { 'Exec_Define_VSM': - command => "/usr/bin/virsh define ${targetxmlfile}", - unless => "/usr/bin/virsh list --all | grep -c ${n1k_vsm::vsmname}", - } + # Don't start VSM if this is pacemaker controlled deployment + if !($n1k_vsm::pacemaker_control) { + exec { 'Exec_Define_VSM': + command => "/usr/bin/virsh define ${targetxmlfile}", + unless => "/usr/bin/virsh list --all | grep -c ${n1k_vsm::vsmname}", + require => File['File_Target_XML_File'], + } - exec { 'Exec_Launch_VSM': - command => "/usr/bin/virsh start ${n1k_vsm::vsmname}", - unless => "/usr/bin/virsh list --all | grep ${n1k_vsm::vsmname} | grep -c running", - } + exec { 'Exec_Launch_VSM': + command => "/usr/bin/virsh start ${n1k_vsm::vsmname}", + unless => ("/usr/bin/virsh list --all | grep ${n1k_vsm::vsmname} | grep -c running"), + require => Exec['Exec_Define_VSM'], + } + } else { + # For pacemker controlled deployment, set up the secondary VSM as well + # ensure tap interfaces and deploy the vsm + $ctrltap_s = 'vsm-ctrl1' + $mgmttap_s = 'vsm-mgmt1' + $pkttap_s = 'vsm-pkt1' + # Generate MACs + $ctrlmac_s = "52:54:${vmb[0]}${vmb[1]}:${vmb[2]}${vmb[3]}:${vmb[4]}${vmb[5]}:${vmb[6]}4" + $mgmtmac_s = "52:54:${vmb[0]}${vmb[1]}:${vmb[2]}${vmb[3]}:${vmb[4]}${vmb[5]}:${vmb[6]}5" + $pktmac_s = "52:54:${vmb[0]}${vmb[1]}:${vmb[2]}${vmb[3]}:${vmb[4]}${vmb[5]}:${vmb[6]}6" - Exec['Exec_create_disk'] -> File['File_Target_XML_File'] -> Exec['Exec_Define_VSM'] -> Exec['Exec_Launch_VSM'] + exec { 'Exec_create_disk_Secondary': + command => "/usr/bin/qemu-img create -f raw ${n1k_vsm::diskfile_s} ${n1k_vsm::disksize}G", + creates => $n1k_vsm::diskfile_s, + } + + $targetxmlfile_s = "/var/spool/cisco/vsm/vsm_${n1k_vsm::vsm_role_s}_deploy.xml" + file { 'File_Target_XML_File_Secondary': + path => $targetxmlfile_s, + owner => 'root', + group => 'root', + mode => '0666', + seltype => 'virt_content_t', + content => template('n1k_vsm/vsm_vm_secondary.xml.erb'), + require => Exec['Exec_create_disk_Secondary'], + } + } } diff --git a/n1k_vsm/manifests/init.pp b/n1k_vsm/manifests/init.pp index a9ee4e83c..ddcd42d00 100644 --- a/n1k_vsm/manifests/init.pp +++ b/n1k_vsm/manifests/init.pp @@ -36,6 +36,24 @@ # [*n1kv_version*] # (required) Version of the Nexus1000v VSM # +# [*pacemaker_control*] +# (optional) Set to determine if pacemaker will control the VSM. If true will deploy both +# primary and secondary VSMs on all nodes and will not start VSM. Defaults to false and +# thus is optional unless this functionality is being used. +# +# [*existing_bridge*] +# (required) If VSM should be installed behind an existing bridge, this should be set to +# true and the bridge name should be provided in phy_if_bridge. +# +# [*vsm_mac_base*] +# (optional) If set, provides randomization for the MAC addresses for the VSM VM(s). +# Should be a (random) hexadecimal number of at least 7 digits (more is fine). +# +# [*phy_bridge_vlan*] +# (optional) In the case that the management interface is a bridge with a tagged +# uplink port, the VLAN tag for that uplink port can be provided which will +# be applied on the patch port connecting vsm-br and the management bridge. +# class n1k_vsm( $n1kv_source = '', $n1kv_version = 'latest', @@ -47,6 +65,10 @@ $vsm_mgmt_ip, $vsm_mgmt_netmask, $vsm_mgmt_gateway, + $pacemaker_control = false, + $existing_bridge = false, + $vsm_mac_base = '', + $phy_bridge_vlan = 0, ) { if($::osfamily != 'Redhat') { @@ -55,6 +77,15 @@ fail("Unsupported osfamily ${::osfamily}") } + # Ensure role is set to primary for pacemaker controlled deployment + # Additionally setup the extra variables for the secondary VSM + if ($n1k_vsm::pacemaker_control) { + $vsm_role_s = 'secondary' + $vsmname_s = 'vsm-s' + $imgfile_s = "/var/spool/cisco/vsm/${vsm_role_s}_repacked.iso" + $diskfile_s = "/var/spool/cisco/vsm/${vsm_role_s}_disk" + } + if ($n1k_vsm::vsm_role == 'primary') or ($n1k_vsm::vsm_role == 'standalone') { $vsmname = 'vsm-p' $mgmtip = $vsm_mgmt_ip @@ -73,23 +104,22 @@ $disksize = 4 $imgfile = "/var/spool/cisco/vsm/${n1k_vsm::vsm_role}_repacked.iso" $diskfile = "/var/spool/cisco/vsm/${n1k_vsm::vsm_role}_disk" - $ovsbridge = 'vsm-br' + + #Set bridge name properly + $ovsbridge = 'vsm-br' #VSM installation will be done only once. Will not respond to puppet sync - $_phy_ip_addr = inline_template("<%= scope.lookupvar('::ipaddress_${n1k_vsm::phy_if_bridge}') %>") - if $_phy_ip_addr != '' { - $phy_ip_addr = inline_template("<%= scope.lookupvar('::ipaddress_${n1k_vsm::phy_if_bridge}') %>") - $phy_ip_mask = inline_template("<%= scope.lookupvar('::netmask_${n1k_vsm::phy_if_bridge}') %>") - $gw_intf = $n1k_vsm::phy_gateway - include n1k_vsm::pkgprep_ovscfg + $_check_phy_if_bridge = regsubst($n1k_vsm::phy_if_bridge, '[.:-]+', '_', 'G') + $_phy_mac_addr = inline_template("<%= scope.lookupvar('::macaddress_${_check_phy_if_bridge}') %>") + if $_phy_mac_addr != '' { + include ::n1k_vsm::pkgprep_ovscfg } notify {"Arg: intf ${phy_if_bridge} vsm_role ${vsm_role} domainid ${vsm_domain_id}" : withpath => true} - notify {"ip ${phy_ip_addr} mask ${phy_ip_mask} gw ${n1k_vsm::phy_gateway}" : withpath => true} - notify {"gw_dv ${gw_intf} ovs ${ovsbridge} vsmname ${n1k_vsm::vsmname}" : withpath => true} + notify {"ovs ${ovsbridge} vsmname ${n1k_vsm::vsmname}" : withpath => true} notify {"mgmtip ${n1k_vsm::mgmtip} vsm_mask ${n1k_vsm::mgmtnetmask} vsm_gw ${n1k_vsm::mgmtgateway}": withpath => false} - include n1k_vsm::vsmprep - include n1k_vsm::deploy + include ::n1k_vsm::vsmprep + include ::n1k_vsm::deploy Class['n1k_vsm::vsmprep'] -> Class['n1k_vsm::deploy'] } diff --git a/n1k_vsm/manifests/pkgprep_ovscfg.pp b/n1k_vsm/manifests/pkgprep_ovscfg.pp index 577cd9fb4..512646da3 100644 --- a/n1k_vsm/manifests/pkgprep_ovscfg.pp +++ b/n1k_vsm/manifests/pkgprep_ovscfg.pp @@ -4,8 +4,8 @@ # class n1k_vsm::pkgprep_ovscfg { - require n1k_vsm - include n1k_vsm + require ::n1k_vsm + include ::n1k_vsm case $::osfamily { 'RedHat': { @@ -19,7 +19,7 @@ # VSM dependent packages installation section package { 'Package_qemu-kvm': ensure => installed, - name => 'qemu-kvm', + name => 'qemu-kvm-rhev', } package {'Package_libvirt': @@ -53,17 +53,8 @@ unless => '/usr/bin/virsh net-info default | /bin/grep -c \'Autostart: .* no\'', } - package { 'Package_ovs': - ensure => installed, - name => 'openvswitch', - } - - # bring up OVS and perform interface configuration - service { 'Service_ovs': - ensure => running, - name => 'openvswitch', - enable => true, - } + # Ensure OVS is present + require vswitch::ovs package { 'genisoimage': ensure => installed, @@ -71,62 +62,119 @@ } notify { "Debug br ${n1k_vsm::ovsbridge} intf ${n1k_vsm::phy_if_bridge} ." : withpath => true } - notify { "Debug ${n1k_vsm::vsmname} ip ${n1k_vsm::phy_ip_addr} mask ${n1k_vsm::phy_ip_mask} gw_intf ${n1k_vsm::gw_intf}" : withpath => true } - - # Check if we've already configured the ovs - if $n1k_vsm::gw_intf != $n1k_vsm::ovsbridge { - # Modify Ovs bridge inteface configuation file - augeas { 'Augeas_modify_ifcfg-ovsbridge': - name => $n1k_vsm::ovsbridge, - context => "/files/etc/sysconfig/network-scripts/ifcfg-${n1k_vsm::ovsbridge}", - changes => [ - 'set TYPE OVSBridge', - "set DEVICE ${n1k_vsm::ovsbridge}", - 'set DEVICETYPE ovs', - "set OVSREQUIRES ${n1k_vsm::ovsbridge}", - 'set NM_CONTROLLED no', - 'set BOOTPROTO none', - 'set ONBOOT yes', - 'set DEFROUTE yes', - 'set MTU 1500', - "set NAME ${n1k_vsm::ovsbridge}", - "set IPADDR ${n1k_vsm::phy_ip_addr}", - "set NETMASK ${n1k_vsm::phy_ip_mask}", - "set GATEWAY ${n1k_vsm::phy_gateway}", - 'set USERCTL no', - ], + + $_ovsbridge = regsubst($n1k_vsm::ovsbridge, '[.:-]+', '_', 'G') + $_ovsbridge_mac = inline_template("<%= scope.lookupvar('::macaddress_${_ovsbridge}') %>") + + # Check if we've already configured the vsm bridge, skip configuration if so + if ($_ovsbridge_mac == '') { + + #Gather info from the port/bridge including IP if needed + $_phy_if_bridge = regsubst($n1k_vsm::phy_if_bridge, '[.:-]+', '_', 'G') + $_phy_ip_addr = inline_template("<%= scope.lookupvar('::ipaddress_${_phy_if_bridge}') %>") + if $_phy_ip_addr != '' and !($n1k_vsm::existing_bridge) { + $phy_ip_addr = inline_template("<%= scope.lookupvar('::ipaddress_${_phy_if_bridge}') %>") + $phy_ip_mask = inline_template("<%= scope.lookupvar('::netmask_${_phy_if_bridge}') %>") + $gw_intf = $n1k_vsm::phy_gateway + + notify {"ip ${phy_ip_addr} mask ${phy_ip_mask} gw ${n1k_vsm::phy_gateway} gw_dv ${gw_intf}" : withpath => true} + + # Modify Ovs bridge inteface configuation file (including IP) + augeas { 'Augeas_modify_ifcfg-ovsbridge': + name => $n1k_vsm::ovsbridge, + context => "/files/etc/sysconfig/network-scripts/ifcfg-${n1k_vsm::ovsbridge}", + changes => [ + 'set TYPE OVSBridge', + "set DEVICE ${n1k_vsm::ovsbridge}", + 'set DEVICETYPE ovs', + "set OVSREQUIRES ${n1k_vsm::ovsbridge}", + 'set NM_CONTROLLED no', + 'set BOOTPROTO none', + 'set ONBOOT yes', + 'set DEFROUTE yes', + 'set MTU 1500', + "set NAME ${n1k_vsm::ovsbridge}", + "set IPADDR ${phy_ip_addr}", + "set NETMASK ${phy_ip_mask}", + "set GATEWAY ${n1k_vsm::phy_gateway}", + 'set USERCTL no', + ], + } + } elsif ($n1k_vsm::existing_bridge) { + # Modify Ovs bridge inteface configuation file (without IP) + augeas { 'Augeas_modify_ifcfg-ovsbridge': + name => $n1k_vsm::ovsbridge, + context => "/files/etc/sysconfig/network-scripts/ifcfg-${n1k_vsm::ovsbridge}", + changes => [ + 'set TYPE OVSBridge', + "set DEVICE ${n1k_vsm::ovsbridge}", + 'set DEVICETYPE ovs', + "set OVSREQUIRES ${n1k_vsm::ovsbridge}", + 'set NM_CONTROLLED no', + 'set BOOTPROTO none', + 'set ONBOOT yes', + 'set DEFROUTE yes', + 'set MTU 1500', + "set NAME ${n1k_vsm::ovsbridge}", + 'set USERCTL no', + ], + } + } else { + # Error out here due to invalid interface specification + fail('Interface to be bridged for VSM must have IP address') } - # Modify Physical Interface config file - augeas { 'Augeas_modify_ifcfg-phy_if_bridge': - name => $n1k_vsm::phy_if_bridge, - context => "/files/etc/sysconfig/network-scripts/ifcfg-${n1k_vsm::phy_if_bridge}", - changes => [ - 'set TYPE OVSPort', - "set DEVICE ${n1k_vsm::phy_if_bridge}", - 'set DEVICETYPE ovs', - "set OVS_BRIDGE ${n1k_vsm::ovsbridge}", - 'set NM_CONTROLLED no', - 'set BOOTPROTO none', - 'set ONBOOT yes', - "set NAME ${n1k_vsm::phy_if_bridge}", - 'rm IPADDR', - 'rm NETMASK', - 'rm GATEWAY', - 'set USERCTL no', - ], - notify => Service['Service_network'], + exec { 'Flap_n1kv_bridge': + command => "/sbin/ifdown ${n1k_vsm::ovsbridge} && /sbin/ifup ${n1k_vsm::ovsbridge}", + require => Augeas['Augeas_modify_ifcfg-ovsbridge'], } - # Make sure that networking comes fine after reboot - file { 'Create_Init_File': - replace => 'yes', - path => '/etc/init.d/n1kv', - owner => 'root', - group => 'root', - mode => '0775', - source => 'puppet:///modules/n1k_vsm/n1kv', + + if !($n1k_vsm::existing_bridge) { + # If there isn't an existing bridge, the interface is a port, and we + # need to add it to vsm-br + + # Modify Physical Interface config file + augeas { 'Augeas_modify_ifcfg-phy_if_bridge': + name => $n1k_vsm::phy_if_bridge, + context => "/files/etc/sysconfig/network-scripts/ifcfg-${n1k_vsm::phy_if_bridge}", + changes => [ + 'set TYPE OVSPort', + "set DEVICE ${n1k_vsm::phy_if_bridge}", + 'set DEVICETYPE ovs', + "set OVS_BRIDGE ${n1k_vsm::ovsbridge}", + 'set NM_CONTROLLED no', + 'set BOOTPROTO none', + 'set ONBOOT yes', + "set NAME ${n1k_vsm::phy_if_bridge}", + 'set DEFROUTE no', + 'set IPADDR ""', + 'rm NETMASK', + 'rm GATEWAY', + 'set USERCTL no', + ], + } + exec { 'Flap_n1kv_phy_if': + command => "/sbin/ifdown ${n1k_vsm::phy_if_bridge} && /sbin/ifup ${n1k_vsm::phy_if_bridge}", + require => Augeas['Augeas_modify_ifcfg-phy_if_bridge'], + } + } else { + # If there is an existing bridge- create patch ports to connect vsm-br to it + exec { 'Create_patch_port_on_existing_bridge': + command => "/bin/ovs-vsctl --may-exist add-port ${n1k_vsm::phy_if_bridge} ${n1k_vsm::phy_if_bridge}-${n1k_vsm::ovsbridge} -- set Interface ${n1k_vsm::phy_if_bridge}-${n1k_vsm::ovsbridge} type=patch options:peer=${n1k_vsm::ovsbridge}-${n1k_vsm::phy_if_bridge}", + require => Exec['Flap_n1kv_bridge'], + } + exec { 'Create_patch_port_on_vsm_bridge': + command => "/bin/ovs-vsctl --may-exist add-port ${n1k_vsm::ovsbridge} ${n1k_vsm::ovsbridge}-${n1k_vsm::phy_if_bridge} -- set Interface ${n1k_vsm::ovsbridge}-${n1k_vsm::phy_if_bridge} type=patch options:peer=${n1k_vsm::phy_if_bridge}-${n1k_vsm::ovsbridge}", + require => Exec['Flap_n1kv_bridge'], + } + if ($n1k_vsm::phy_bridge_vlan > 0) and ($n1k_vsm::phy_bridge_vlan < 4096) { + exec { 'Tag_patch_port': + command => "/bin/ovs-vsctl set port ${n1k_vsm::phy_if_bridge}-${n1k_vsm::ovsbridge} tag=${n1k_vsm::phy_bridge_vlan}", + require => Exec['Create_patch_port_on_existing_bridge'], + } + } } - } # endif of if "${n1k_vsm::gw_intf}" != "${n1k_vsm::ovsbridge}" + } # endif of if ($_ovsbridge_mac == '') } 'Ubuntu': { } diff --git a/n1k_vsm/manifests/vsmprep.pp b/n1k_vsm/manifests/vsmprep.pp index 32447022f..f26f9e004 100644 --- a/n1k_vsm/manifests/vsmprep.pp +++ b/n1k_vsm/manifests/vsmprep.pp @@ -5,9 +5,9 @@ # class n1k_vsm::vsmprep { - include 'stdlib' - require n1k_vsm - include n1k_vsm + include ::stdlib + require ::n1k_vsm + include ::n1k_vsm # prepare vsm folders ensure_resource('file', '/var/spool/cisco/', { @@ -35,12 +35,9 @@ } } else { $vsmimage_uri = 'unspec' + $vsm_path = '/opt/cisco/vsm' } -# exec { 'Prev_VSM': -# command => "/bin/rm -f /var/spool/cisco/vsm/* || /bin/true", -# } - if $vsmimage_uri == 'file' { #specify location on target-host where image file will be downloaded to. file { $vsmtgtimg: @@ -63,8 +60,9 @@ } } package {'nexus-1000v-iso': - ensure => $n1k_vsm::n1kv_version, - name => 'nexus-1000v-iso' + ensure => 'present', + name => 'nexus-1000v-iso', + provider => 'yum', } } @@ -78,10 +76,30 @@ source => 'puppet:///modules/n1k_vsm/repackiso.py', } + # copy the latest VSM image to known name + if $n1k_vsm::n1kv_version == '' or $n1k_vsm::n1kv_version == 'latest'{ + exec { 'Exec_VSM_Rename': + command => "/bin/cp ${vsm_path}/`/bin/ls ${vsm_path} | /bin/sort -r | /bin/grep -m 1 iso` ${vsm_path}/current-n1000v.iso", + creates => "${vsm_path}/current-n1000v.iso", + } + } else { + exec { 'Exec_VSM_Rename_with_version': + command => "/bin/cp ${vsm_path}/n1000v-dk9.${n1k_vsm::n1kv_version}.iso ${vsm_path}/current-n1000v.iso", + creates => "${vsm_path}/current-n1000v.iso", + } + } + # Now generate ovf xml file and repackage the iso exec { 'Exec_VSM_Repackage_Script': - command => "/tmp/repackiso.py -i/var/spool/cisco/vsm/${n1k_vsm::n1kv_version}.iso -d${n1k_vsm::vsm_domain_id} -n${n1k_vsm::vsmname} -m${n1k_vsm::mgmtip} -s${n1k_vsm::mgmtnetmask} -g${n1k_vsm::mgmtgateway} -p${n1k_vsm::vsm_admin_passwd} -r${n1k_vsm::vsm_role} -f/var/spool/cisco/vsm/${n1k_vsm::vsm_role}_repacked.iso ", - unless => "/usr/bin/virsh list --all | grep -c ${n1k_vsm::vsmname}", + command => "/tmp/repackiso.py -i${vsm_path}/current-n1000v.iso -d${n1k_vsm::vsm_domain_id} -n${n1k_vsm::vsmname} -m${n1k_vsm::mgmtip} -s${n1k_vsm::mgmtnetmask} -g${n1k_vsm::mgmtgateway} -p${n1k_vsm::vsm_admin_passwd} -r${n1k_vsm::vsm_role} -f/var/spool/cisco/vsm/${n1k_vsm::vsm_role}_repacked.iso", + creates => "/var/spool/cisco/vsm/${n1k_vsm::vsm_role}_repacked.iso", } + # If we're under pacemaker_control, create a secondary VSM iso as well + if ($n1k_vsm::pacemaker_control) { + exec { 'Exec_VSM_Repackage_Script_secondary': + command => "/tmp/repackiso.py -i${vsm_path}/current-n1000v.iso -d${n1k_vsm::vsm_domain_id} -n${n1k_vsm::vsmname_s} -m${n1k_vsm::mgmtip} -s${n1k_vsm::mgmtnetmask} -g${n1k_vsm::mgmtgateway} -p${n1k_vsm::vsm_admin_passwd} -r${n1k_vsm::vsm_role_s} -f/var/spool/cisco/vsm/${n1k_vsm::vsm_role_s}_repacked.iso", + creates => "/var/spool/cisco/vsm/${n1k_vsm::vsm_role_s}_repacked.iso", + } + } } diff --git a/n1k_vsm/spec/acceptance/nodesets/centos-70-x64.yml b/n1k_vsm/spec/acceptance/nodesets/centos-70-x64.yml new file mode 100644 index 000000000..5f097e9fe --- /dev/null +++ b/n1k_vsm/spec/acceptance/nodesets/centos-70-x64.yml @@ -0,0 +1,11 @@ +HOSTS: + centos-server-70-x64: + roles: + - master + platform: el-7-x86_64 + box: puppetlabs/centos-7.0-64-nocm + box_url: https://vagrantcloud.com/puppetlabs/centos-7.0-64-nocm + hypervisor: vagrant +CONFIG: + log_level: debug + type: foss diff --git a/n1k_vsm/spec/acceptance/nodesets/default.yml b/n1k_vsm/spec/acceptance/nodesets/default.yml new file mode 100644 index 000000000..3bb3e6264 --- /dev/null +++ b/n1k_vsm/spec/acceptance/nodesets/default.yml @@ -0,0 +1,11 @@ +HOSTS: + ubuntu-server-1404-x64: + roles: + - master + platform: ubuntu-14.04-amd64 + box: puppetlabs/ubuntu-14.04-64-nocm + box_url: https://vagrantcloud.com/puppetlabs/ubuntu-14.04-64-nocm + hypervisor: vagrant +CONFIG: + log_level: debug + type: git diff --git a/n1k_vsm/spec/acceptance/nodesets/nodepool-centos7.yml b/n1k_vsm/spec/acceptance/nodesets/nodepool-centos7.yml new file mode 100644 index 000000000..c55287420 --- /dev/null +++ b/n1k_vsm/spec/acceptance/nodesets/nodepool-centos7.yml @@ -0,0 +1,10 @@ +HOSTS: + centos-70-x64: + roles: + - master + platform: el-7-x86_64 + hypervisor: none + ip: 127.0.0.1 +CONFIG: + type: foss + set_env: false diff --git a/n1k_vsm/spec/acceptance/nodesets/nodepool-trusty.yml b/n1k_vsm/spec/acceptance/nodesets/nodepool-trusty.yml new file mode 100644 index 000000000..7f503cadd --- /dev/null +++ b/n1k_vsm/spec/acceptance/nodesets/nodepool-trusty.yml @@ -0,0 +1,10 @@ +HOSTS: + ubuntu-1404-x64: + roles: + - master + platform: ubuntu-14.04-amd64 + hypervisor: none + ip: 127.0.0.1 +CONFIG: + type: foss + set_env: false diff --git a/n1k_vsm/spec/acceptance/nodesets/ubuntu-server-1404-x64.yml b/n1k_vsm/spec/acceptance/nodesets/ubuntu-server-1404-x64.yml new file mode 100644 index 000000000..3bb3e6264 --- /dev/null +++ b/n1k_vsm/spec/acceptance/nodesets/ubuntu-server-1404-x64.yml @@ -0,0 +1,11 @@ +HOSTS: + ubuntu-server-1404-x64: + roles: + - master + platform: ubuntu-14.04-amd64 + box: puppetlabs/ubuntu-14.04-64-nocm + box_url: https://vagrantcloud.com/puppetlabs/ubuntu-14.04-64-nocm + hypervisor: vagrant +CONFIG: + log_level: debug + type: git diff --git a/n1k_vsm/spec/classes/n1kv_vsm_pkgprep_ovscfg_spec.rb b/n1k_vsm/spec/classes/n1kv_vsm_pkgprep_ovscfg_spec.rb new file mode 100644 index 000000000..3f01a0848 --- /dev/null +++ b/n1k_vsm/spec/classes/n1kv_vsm_pkgprep_ovscfg_spec.rb @@ -0,0 +1,292 @@ +# +# Copyright (C) 2015 Cisco Systems Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +require 'spec_helper' + +describe 'n1k_vsm::pkgprep_ovscfg' do + + let :params do + { } + end + + shared_examples_for 'n1k vsm pkgprep_ovscfg' do + + context 'for default values' do + let :pre_condition do + "class { 'n1k_vsm': + phy_if_bridge => 'eth0', + phy_gateway => '1.1.1.3', + vsm_domain_id => '1', + vsm_admin_passwd => 'secrete', + vsm_mgmt_ip => '1.1.1.1', + vsm_mgmt_netmask => '255.255.255.0', + vsm_mgmt_gateway => '1.1.1.2', + existing_bridge => false, + }" + end + let :facts do + { + :ipaddress_eth0 => '1.1.1.1', + :netmask_eth0 => '255.255.255.0', + :osfamily => 'RedHat' + } + end + + it 'should require vswitch::ovs' do + is_expected.to contain_class('vswitch::ovs') + end + + it 'create ovs bridge' do + is_expected.to contain_augeas('Augeas_modify_ifcfg-ovsbridge').with( + 'name' => 'vsm-br', + 'context' => '/files/etc/sysconfig/network-scripts/ifcfg-vsm-br', + 'changes' => ['set TYPE OVSBridge', + 'set DEVICE vsm-br', + 'set DEVICETYPE ovs', + 'set OVSREQUIRES vsm-br', + 'set NM_CONTROLLED no', + 'set BOOTPROTO none', + 'set ONBOOT yes', + 'set DEFROUTE yes', + 'set MTU 1500', + 'set NAME vsm-br', + 'set IPADDR 1.1.1.1', + 'set NETMASK 255.255.255.0', + 'set GATEWAY 1.1.1.3', + 'set USERCTL no'] + ) + end + + it 'flap bridge' do + is_expected.to contain_exec('Flap_n1kv_bridge').with( + 'command' => '/sbin/ifdown vsm-br && /sbin/ifup vsm-br', + ).that_requires('Augeas[Augeas_modify_ifcfg-ovsbridge]') + end + + it 'attach phy if port to bridge' do + is_expected.to contain_augeas('Augeas_modify_ifcfg-phy_if_bridge').with( + 'name' => 'eth0', + 'context' => '/files/etc/sysconfig/network-scripts/ifcfg-eth0', + ) + end + + it 'flap port' do + is_expected.to contain_exec('Flap_n1kv_phy_if').with( + 'command' => '/sbin/ifdown eth0 && /sbin/ifup eth0', + ).that_requires('Augeas[Augeas_modify_ifcfg-phy_if_bridge]') + end + end + + context 'for existing bridge' do + let :pre_condition do + "class { 'n1k_vsm': + phy_if_bridge => 'br_ex', + phy_gateway => '1.1.1.3', + vsm_domain_id => '1', + vsm_admin_passwd => 'secrete', + vsm_mgmt_ip => '1.1.1.1', + vsm_mgmt_netmask => '255.255.255.0', + vsm_mgmt_gateway => '1.1.1.2', + existing_bridge => true, + }" + end + let :facts do + { + :ipaddress_br_ex => '1.1.1.6', + :osfamily => 'RedHat' + } + end + + it 'should require vswitch::ovs' do + is_expected.to contain_class('vswitch::ovs') + end + + it 'create ovs bridge' do + is_expected.to contain_augeas('Augeas_modify_ifcfg-ovsbridge').with( + 'name' => 'vsm-br', + 'context' => '/files/etc/sysconfig/network-scripts/ifcfg-vsm-br', + 'changes' => ['set TYPE OVSBridge', + 'set DEVICE vsm-br', + 'set DEVICETYPE ovs', + 'set OVSREQUIRES vsm-br', + 'set NM_CONTROLLED no', + 'set BOOTPROTO none', + 'set ONBOOT yes', + 'set DEFROUTE yes', + 'set MTU 1500', + 'set NAME vsm-br', + 'set USERCTL no'] + ) + end + + it 'flap bridge' do + is_expected.to contain_exec('Flap_n1kv_bridge').with( + 'command' => '/sbin/ifdown vsm-br && /sbin/ifup vsm-br', + ).that_requires('Augeas[Augeas_modify_ifcfg-ovsbridge]') + end + + it 'create patch port on existing bridge' do + is_expected.to contain_exec('Create_patch_port_on_existing_bridge').with( + 'command' => '/bin/ovs-vsctl --may-exist add-port br_ex br_ex-vsm-br -- set Interface br_ex-vsm-br type=patch options:peer=vsm-br-br_ex' + ).that_requires('Exec[Flap_n1kv_bridge]') + end + + it 'create patch port on vsm bridge' do + is_expected.to contain_exec('Create_patch_port_on_vsm_bridge').with( + 'command' => '/bin/ovs-vsctl --may-exist add-port vsm-br vsm-br-br_ex -- set Interface vsm-br-br_ex type=patch options:peer=br_ex-vsm-br' + ).that_requires('Exec[Flap_n1kv_bridge]') + end + end + + context 'for existing bridge no ip' do + let :pre_condition do + "class { 'n1k_vsm': + phy_if_bridge => 'br_ex', + phy_gateway => '1.1.1.3', + vsm_domain_id => '1', + vsm_admin_passwd => 'secrete', + vsm_mgmt_ip => '1.1.1.1', + vsm_mgmt_netmask => '255.255.255.0', + vsm_mgmt_gateway => '1.1.1.2', + existing_bridge => true, + }" + end + let :facts do + { + :osfamily => 'RedHat' + } + end + + it 'should require vswitch::ovs' do + is_expected.to contain_class('vswitch::ovs') + end + + it 'create ovs bridge' do + is_expected.to contain_augeas('Augeas_modify_ifcfg-ovsbridge').with( + 'name' => 'vsm-br', + 'context' => '/files/etc/sysconfig/network-scripts/ifcfg-vsm-br', + 'changes' => ['set TYPE OVSBridge', + 'set DEVICE vsm-br', + 'set DEVICETYPE ovs', + 'set OVSREQUIRES vsm-br', + 'set NM_CONTROLLED no', + 'set BOOTPROTO none', + 'set ONBOOT yes', + 'set DEFROUTE yes', + 'set MTU 1500', + 'set NAME vsm-br', + 'set USERCTL no'] + ) + end + + it 'flap bridge' do + is_expected.to contain_exec('Flap_n1kv_bridge').with( + 'command' => '/sbin/ifdown vsm-br && /sbin/ifup vsm-br', + ).that_requires('Augeas[Augeas_modify_ifcfg-ovsbridge]') + end + + it 'create patch port on existing bridge' do + is_expected.to contain_exec('Create_patch_port_on_existing_bridge').with( + 'command' => '/bin/ovs-vsctl --may-exist add-port br_ex br_ex-vsm-br -- set Interface br_ex-vsm-br type=patch options:peer=vsm-br-br_ex' + ).that_requires('Exec[Flap_n1kv_bridge]') + end + + it 'create patch port on vsm bridge' do + is_expected.to contain_exec('Create_patch_port_on_vsm_bridge').with( + 'command' => '/bin/ovs-vsctl --may-exist add-port vsm-br vsm-br-br_ex -- set Interface vsm-br-br_ex type=patch options:peer=br_ex-vsm-br' + ).that_requires('Exec[Flap_n1kv_bridge]') + end + end + + context 'for existing bridge tagged' do + let :pre_condition do + "class { 'n1k_vsm': + phy_if_bridge => 'br_ex', + phy_gateway => '1.1.1.3', + vsm_domain_id => '1', + vsm_admin_passwd => 'secrete', + vsm_mgmt_ip => '1.1.1.1', + vsm_mgmt_netmask => '255.255.255.0', + vsm_mgmt_gateway => '1.1.1.2', + existing_bridge => true, + phy_bridge_vlan => 100, + }" + end + let :facts do + { + :ipaddress_br_ex => '1.1.1.6', + :osfamily => 'RedHat' + } + end + + it 'should require vswitch::ovs' do + is_expected.to contain_class('vswitch::ovs') + end + + it 'create ovs bridge' do + is_expected.to contain_augeas('Augeas_modify_ifcfg-ovsbridge').with( + 'name' => 'vsm-br', + 'context' => '/files/etc/sysconfig/network-scripts/ifcfg-vsm-br', + 'changes' => ['set TYPE OVSBridge', + 'set DEVICE vsm-br', + 'set DEVICETYPE ovs', + 'set OVSREQUIRES vsm-br', + 'set NM_CONTROLLED no', + 'set BOOTPROTO none', + 'set ONBOOT yes', + 'set DEFROUTE yes', + 'set MTU 1500', + 'set NAME vsm-br', + 'set USERCTL no'] + ) + end + + it 'flap bridge' do + is_expected.to contain_exec('Flap_n1kv_bridge').with( + 'command' => '/sbin/ifdown vsm-br && /sbin/ifup vsm-br', + ).that_requires('Augeas[Augeas_modify_ifcfg-ovsbridge]') + end + + it 'create patch port on existing bridge' do + is_expected.to contain_exec('Create_patch_port_on_existing_bridge').with( + 'command' => '/bin/ovs-vsctl --may-exist add-port br_ex br_ex-vsm-br -- set Interface br_ex-vsm-br type=patch options:peer=vsm-br-br_ex' + ).that_requires('Exec[Flap_n1kv_bridge]') + end + + it 'create patch port on vsm bridge' do + is_expected.to contain_exec('Create_patch_port_on_vsm_bridge').with( + 'command' => '/bin/ovs-vsctl --may-exist add-port vsm-br vsm-br-br_ex -- set Interface vsm-br-br_ex type=patch options:peer=br_ex-vsm-br' + ).that_requires('Exec[Flap_n1kv_bridge]') + end + + it 'tag patch port' do + is_expected.to contain_exec('Tag_patch_port').with( + 'command' => '/bin/ovs-vsctl set port br_ex-vsm-br tag=100' + ).that_requires('Exec[Create_patch_port_on_existing_bridge]') + end + end + end + + context 'on RedHat platforms' do + let :facts do + { :osfamily => 'RedHat' } + end + + it_configures 'n1k vsm pkgprep_ovscfg' + end + +end + diff --git a/n1k_vsm/spec/classes/n1kv_vsm_spec.rb b/n1k_vsm/spec/classes/n1kv_vsm_spec.rb new file mode 100644 index 000000000..564802b6d --- /dev/null +++ b/n1k_vsm/spec/classes/n1kv_vsm_spec.rb @@ -0,0 +1,21 @@ +# +# Copyright (C) 2015 eNovance SAS +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +require 'spec_helper' + +describe 'n1k_vsm' do + +end diff --git a/n1k_vsm/spec/classes/n1kv_vsm_vsmprep_spec.rb b/n1k_vsm/spec/classes/n1kv_vsm_vsmprep_spec.rb new file mode 100644 index 000000000..84500f718 --- /dev/null +++ b/n1k_vsm/spec/classes/n1kv_vsm_vsmprep_spec.rb @@ -0,0 +1,243 @@ +# +# Copyright (C) 2015 Cisco Systems Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +require 'spec_helper' + +describe 'n1k_vsm::vsmprep' do + + let :params do + { } + end + + shared_examples_for 'n1k vsm prep' do + + context 'get vsm from file' do + let :pre_condition do + "class { 'n1k_vsm': + phy_gateway => '1.1.1.3', + vsm_domain_id => '1', + vsm_admin_passwd => 'secrete', + vsm_mgmt_ip => '1.1.1.1', + vsm_mgmt_netmask => '255.255.255.0', + vsm_mgmt_gateway => '1.1.1.2', + n1kv_version => '5.2.1.SK3.2.2a-1', + n1kv_source => 'vsm_test', + vsm_role => 'primary', + }" + end + + it 'gets vsm from file' do + is_expected.to contain_file('/var/spool/cisco/vsm/vsm_test').with( + 'source' => 'puppet:///modules/n1k_vsm/vsm_test', + ) + end + + it 'runs repackage iso script' do + is_expected.to contain_exec('Exec_VSM_Repackage_Script').with( + :command => '/tmp/repackiso.py -i/var/spool/cisco/vsm/current-n1000v.iso -d1 -nvsm-p -m1.1.1.1 -s255.255.255.0 -g1.1.1.2 -psecrete -rprimary -f/var/spool/cisco/vsm/primary_repacked.iso', + :creates => '/var/spool/cisco/vsm/primary_repacked.iso' + ) + end + end + + context 'get vsm from specified repo' do + let :pre_condition do + "class { 'n1k_vsm': + phy_gateway => '1.1.1.3', + vsm_domain_id => '1', + vsm_admin_passwd => 'secrete', + vsm_mgmt_ip => '1.1.1.1', + vsm_mgmt_netmask => '255.255.255.0', + vsm_mgmt_gateway => '1.1.1.2', + n1kv_version => '5.2.1.SK3.2.2a-1', + n1kv_source => 'http://vsm_test', + vsm_role => 'primary', + }" + end + + it 'configures specified repo' do + is_expected.to contain_yumrepo('cisco-vsm-repo').with( + 'baseurl' => 'http://vsm_test', + 'gpgkey' => 'http://vsm_test/RPM-GPG-KEY' + ) + end + + # Currently we always just check if VSM is present + it 'installs latest n1kv sofware' do + is_expected.to contain_package('nexus-1000v-iso').with( + :ensure => 'present', + ) + end + it 'runs repackage iso script' do + is_expected.to contain_exec('Exec_VSM_Repackage_Script').with( + :command => '/tmp/repackiso.py -i/opt/cisco/vsm/current-n1000v.iso -d1 -nvsm-p -m1.1.1.1 -s255.255.255.0 -g1.1.1.2 -psecrete -rprimary -f/var/spool/cisco/vsm/primary_repacked.iso', + :creates => '/var/spool/cisco/vsm/primary_repacked.iso' + ) + end + end + + context 'get vsm from pre-configured repo' do + let :pre_condition do + "class { 'n1k_vsm': + phy_gateway => '1.1.1.3', + vsm_domain_id => '1', + vsm_admin_passwd => 'secrete', + vsm_mgmt_ip => '1.1.1.1', + vsm_mgmt_netmask => '255.255.255.0', + vsm_mgmt_gateway => '1.1.1.2', + n1kv_version => '5.2.1.SK3.2.2a-1', + vsm_role => 'primary', + }" + end + + # Currently we always just check if VSM is present + it 'installs latest n1kv sofware' do + is_expected.to contain_package('nexus-1000v-iso').with( + :ensure => 'present', + ) + end + + it 'runs repackage iso script' do + is_expected.to contain_exec('Exec_VSM_Repackage_Script').with( + :command => '/tmp/repackiso.py -i/opt/cisco/vsm/current-n1000v.iso -d1 -nvsm-p -m1.1.1.1 -s255.255.255.0 -g1.1.1.2 -psecrete -rprimary -f/var/spool/cisco/vsm/primary_repacked.iso', + :creates => '/var/spool/cisco/vsm/primary_repacked.iso' + ) + end + end + + context 'get vsm from pre-configured repo secondary' do + let :pre_condition do + "class { 'n1k_vsm': + phy_gateway => '1.1.1.3', + vsm_domain_id => '1', + vsm_admin_passwd => 'secrete', + vsm_mgmt_ip => '1.1.1.1', + vsm_mgmt_netmask => '255.255.255.0', + vsm_mgmt_gateway => '1.1.1.2', + n1kv_version => '5.2.1.SK3.2.2a-1', + vsm_role => 'secondary', + }" + end + + # Currently we always just check if VSM is present + it 'installs latest n1kv sofware' do + is_expected.to contain_package('nexus-1000v-iso').with( + :ensure => 'present', + ) + end + + it 'runs repackage iso script' do + is_expected.to contain_exec('Exec_VSM_Repackage_Script').with( + :command => '/tmp/repackiso.py -i/opt/cisco/vsm/current-n1000v.iso -d1 -nvsm-s -m0.0.0.0 -s0.0.0.0 -g0.0.0.0 -psecrete -rsecondary -f/var/spool/cisco/vsm/secondary_repacked.iso', + :creates => '/var/spool/cisco/vsm/secondary_repacked.iso' + ) + end + end + + context 'get vsm from pre-configured repo pacemaker controlled' do + let :pre_condition do + "class { 'n1k_vsm': + phy_gateway => '1.1.1.3', + vsm_domain_id => '1', + vsm_admin_passwd => 'secrete', + vsm_mgmt_ip => '1.1.1.1', + vsm_mgmt_netmask => '255.255.255.0', + vsm_mgmt_gateway => '1.1.1.2', + n1kv_version => '5.2.1.SK3.2.2a-1', + pacemaker_control => true, + }" + end + + # Currently we always just check if VSM is present + it 'installs latest n1kv sofware' do + is_expected.to contain_package('nexus-1000v-iso').with( + :ensure => 'present', + ) + end + + it 'runs rename with version' do + is_expected.to contain_exec('Exec_VSM_Rename_with_version').with( + :command => '/bin/cp /opt/cisco/vsm/n1000v-dk9.5.2.1.SK3.2.2a-1.iso /opt/cisco/vsm/current-n1000v.iso', + :creates => '/opt/cisco/vsm/current-n1000v.iso' + ) + end + + it 'runs repackage iso script' do + is_expected.to contain_exec('Exec_VSM_Repackage_Script').with( + :command => '/tmp/repackiso.py -i/opt/cisco/vsm/current-n1000v.iso -d1 -nvsm-p -m1.1.1.1 -s255.255.255.0 -g1.1.1.2 -psecrete -rprimary -f/var/spool/cisco/vsm/primary_repacked.iso', + :creates => '/var/spool/cisco/vsm/primary_repacked.iso' + ) + end + + it 'runs repackage iso script secondary' do + is_expected.to contain_exec('Exec_VSM_Repackage_Script_secondary').with( + :command => '/tmp/repackiso.py -i/opt/cisco/vsm/current-n1000v.iso -d1 -nvsm-s -m1.1.1.1 -s255.255.255.0 -g1.1.1.2 -psecrete -rsecondary -f/var/spool/cisco/vsm/secondary_repacked.iso', + :creates => '/var/spool/cisco/vsm/secondary_repacked.iso' + ) + end + end + context 'get vsm from pre-configured repo pacemaker controlled latest version' do + let :pre_condition do + "class { 'n1k_vsm': + phy_gateway => '1.1.1.3', + vsm_domain_id => '1', + vsm_admin_passwd => 'secrete', + vsm_mgmt_ip => '1.1.1.1', + vsm_mgmt_netmask => '255.255.255.0', + vsm_mgmt_gateway => '1.1.1.2', + n1kv_version => 'latest', + pacemaker_control => true, + }" + end + + # Currently we always just check if VSM is present + it 'installs latest n1kv sofware' do + is_expected.to contain_package('nexus-1000v-iso').with( + :ensure => 'present', + ) + end + + it 'runs rename without version' do + is_expected.to contain_exec('Exec_VSM_Rename').with( + :creates => '/opt/cisco/vsm/current-n1000v.iso' + ) + end + + it 'runs repackage iso script' do + is_expected.to contain_exec('Exec_VSM_Repackage_Script').with( + :command => '/tmp/repackiso.py -i/opt/cisco/vsm/current-n1000v.iso -d1 -nvsm-p -m1.1.1.1 -s255.255.255.0 -g1.1.1.2 -psecrete -rprimary -f/var/spool/cisco/vsm/primary_repacked.iso', + :creates => '/var/spool/cisco/vsm/primary_repacked.iso' + ) + end + + it 'runs repackage iso script secondary' do + is_expected.to contain_exec('Exec_VSM_Repackage_Script_secondary').with( + :command => '/tmp/repackiso.py -i/opt/cisco/vsm/current-n1000v.iso -d1 -nvsm-s -m1.1.1.1 -s255.255.255.0 -g1.1.1.2 -psecrete -rsecondary -f/var/spool/cisco/vsm/secondary_repacked.iso', + :creates => '/var/spool/cisco/vsm/secondary_repacked.iso' + ) + end + end + end + + context 'on RedHat platforms' do + let :facts do + { :osfamily => 'RedHat' } + end + + it_configures 'n1k vsm prep' + end + +end diff --git a/n1k_vsm/spec/shared_examples.rb b/n1k_vsm/spec/shared_examples.rb new file mode 100644 index 000000000..fec0eacc9 --- /dev/null +++ b/n1k_vsm/spec/shared_examples.rb @@ -0,0 +1,5 @@ +shared_examples_for "a Puppet::Error" do |description| + it "with message matching #{description.inspect}" do + expect { is_expected.to have_class_count(1) }.to raise_error(Puppet::Error, description) + end +end diff --git a/n1k_vsm/spec/spec_helper.rb b/n1k_vsm/spec/spec_helper.rb new file mode 100644 index 000000000..700be6a27 --- /dev/null +++ b/n1k_vsm/spec/spec_helper.rb @@ -0,0 +1,19 @@ +require 'puppetlabs_spec_helper/module_spec_helper' +require 'shared_examples' +require 'webmock/rspec' + +RSpec.configure do |c| + c.alias_it_should_behave_like_to :it_configures, 'configures' + c.alias_it_should_behave_like_to :it_raises, 'raises' + + c.default_facts = { + :kernel => 'Linux', + :concat_basedir => '/var/lib/puppet/concat', + :memorysize => '1000 MB', + :processorcount => '1', + :puppetversion => '3.7.3', + :uniqueid => '123' + } +end + +at_exit { RSpec::Puppet::Coverage.report! } diff --git a/n1k_vsm/spec/spec_helper_acceptance.rb b/n1k_vsm/spec/spec_helper_acceptance.rb new file mode 100644 index 000000000..429e807c4 --- /dev/null +++ b/n1k_vsm/spec/spec_helper_acceptance.rb @@ -0,0 +1,56 @@ +require 'beaker-rspec' +require 'beaker/puppet_install_helper' + +run_puppet_install_helper + +RSpec.configure do |c| + # Project root + proj_root = File.expand_path(File.join(File.dirname(__FILE__), '..')) + modname = JSON.parse(open('metadata.json').read)['name'].split('-')[1] + + # Readable test descriptions + c.formatter = :documentation + + # Configure all nodes in nodeset + c.before :suite do + # Install module and dependencies + hosts.each do |host| + + # install git + install_package host, 'git' + + zuul_ref = ENV['ZUUL_REF'] + zuul_branch = ENV['ZUUL_BRANCH'] + zuul_url = ENV['ZUUL_URL'] + + repo = 'openstack/puppet-openstack-integration' + + # Start out with clean moduledir, don't trust r10k to purge it + on host, "rm -rf /etc/puppet/modules/*" + # Install dependent modules via git or zuul + r = on host, "test -e /usr/zuul-env/bin/zuul-cloner", { :acceptable_exit_codes => [0,1] } + if r.exit_code == 0 + zuul_clone_cmd = '/usr/zuul-env/bin/zuul-cloner ' + zuul_clone_cmd += '--cache-dir /opt/git ' + zuul_clone_cmd += "--zuul-ref #{zuul_ref} " + zuul_clone_cmd += "--zuul-branch #{zuul_branch} " + zuul_clone_cmd += "--zuul-url #{zuul_url} " + zuul_clone_cmd += "git://git.openstack.org #{repo}" + on host, zuul_clone_cmd + else + on host, "git clone https://git.openstack.org/#{repo} #{repo}" + end + + on host, "ZUUL_REF=#{zuul_ref} ZUUL_BRANCH=#{zuul_branch} ZUUL_URL=#{zuul_url} bash #{repo}/install_modules.sh" + + # Install the module being tested + on host, "rm -fr /etc/puppet/modules/#{modname}" + puppet_module_install(:source => proj_root, :module_name => modname) + + on host, "rm -fr #{repo}" + + # List modules installed to help with debugging + on host, puppet('module','list'), { :acceptable_exit_codes => 0 } + end + end +end diff --git a/n1k_vsm/templates/vsm_vm.xml.erb b/n1k_vsm/templates/vsm_vm.xml.erb index 82a2a013e..7096724b6 100644 --- a/n1k_vsm/templates/vsm_vm.xml.erb +++ b/n1k_vsm/templates/vsm_vm.xml.erb @@ -41,6 +41,9 @@ + <% if scope.lookupvar('n1k_vsm::pacemaker_control') == true %> + + <% end %>
@@ -49,6 +52,9 @@ + <% if scope.lookupvar('n1k_vsm::pacemaker_control') == true %> + + <% end %>
@@ -57,6 +63,9 @@ + <% if scope.lookupvar('n1k_vsm::pacemaker_control') == true %> + + <% end %>
diff --git a/n1k_vsm/templates/vsm_vm_secondary.xml.erb b/n1k_vsm/templates/vsm_vm_secondary.xml.erb new file mode 100644 index 000000000..cbd237bfa --- /dev/null +++ b/n1k_vsm/templates/vsm_vm_secondary.xml.erb @@ -0,0 +1,92 @@ + + <%= scope.lookupvar('n1k_vsm::vsmname_s') %> + <%= scope.lookupvar('n1k_vsm::memory') %> + <%= scope.lookupvar('n1k_vsm::vcpu') %> + + + hvm + + + + + + + + destroy + restart + restart + + + /usr/libexec/qemu-kvm + + + '/> + + + + + + '/> + + + + + + +
+ + + + '/> + + + + <% if scope.lookupvar('n1k_vsm::pacemaker_control') == true %> + + <% end %> +
+ + + + '/> + + + + <% if scope.lookupvar('n1k_vsm::pacemaker_control') == true %> + + <% end %> +
+ + + + '/> + + + + <% if scope.lookupvar('n1k_vsm::pacemaker_control') == true %> + + <% end %> +
+ + + + + + + + +