forked from iranzo/rhevm-utils
-
Notifications
You must be signed in to change notification settings - Fork 0
/
rhev-vm-cluster.py
executable file
·251 lines (212 loc) · 10.3 KB
/
rhev-vm-cluster.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
#!/usr/bin/env python
#
# Author: Pablo Iranzo Gomez ([email protected])
#
# Description: Script for VM's grouping/ungrouping using rhevm-sdk
# api based on RHCS cluster_ tags on RHEV-M and elas_manage
#
# Requires rhevm-sdk to work
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# Goals:
# - Do not run vm's from a named cluster run on the same host (cluster_)
# - Do not manage any VM without tag elas_manage
# tags behaviour
# elas_manage: manage this VM by using the elastic management script (EMS)
# elas_start : make this VM autostart if down
# cluster_***: make this VM part of a RHCS 'cluster' to avoid same-host placement
#
import optparse
from rhev_functions import *
description = """
RHEV-vm-cluster is a script for managing via API the VMs under RHEV command in both RHEV-H and RHEL hosts.
It's goal is to keep some VM's <-> host rules to avoid having two cluster (RHCS)
nodes at the same physical host.
"""
# Option parsing
p = optparse.OptionParser("rhev-vm-cluster.py [arguments]", description=description)
p.add_option("-u", "--user", dest="username", help="Username to connect to RHEVM API", metavar="admin@internal",
default="admin@internal")
p.add_option("-w", "--password", dest="password", help="Password to use with username", metavar="admin",
default="admin")
p.add_option("-k", action="store_true", dest="keyring", help="use python keyring for user/password", metavar="keyring",
default=False)
p.add_option("-W", action="store_true", dest="askpassword", help="Ask for password", metavar="admin", default=False)
p.add_option("-s", "--server", dest="server", help="RHEV-M server address/hostname to contact", metavar="127.0.0.1",
default="127.0.0.1")
p.add_option("-p", "--port", dest="port", help="API port to contact", metavar="443", default="443")
p.add_option('-v', "--verbosity", dest="verbosity", help="Show messages while running", metavar='[0-n]', default=0,
type='int')
p.add_option('-t', "--tagall", dest="tagall", help="Tag all hosts with elas_manage", metavar='0/1', default=0,
type='int')
p.add_option('-c', "--cluster", dest="cluster", help="Select cluster name to process", metavar='cluster', default=None)
(options, args) = p.parse_args()
options.username, options.password = getuserpass(options)
baseurl = "https://%s:%s" % (options.server, options.port)
api = apilogin(url=baseurl, username=options.username, password=options.password)
# FUNCTIONS
def process_cluster(cluster):
"""Processes cluster
@param cluster: Cluster ID to process
"""
# Emtpy vars for further processing
hosts_in_cluster = []
vms_in_cluster = []
tags_in_cluster = []
tags_vm = {}
tags_with_more_than_one = []
# Get host list from this cluster
query = "cluster = %s and status = up" % api.clusters.get(id=cluster.id).name
for host in paginate(api.hosts, query):
if host.cluster.id == cluster.id:
if host.status.state == "up":
hosts_in_cluster.append(host.id)
if options.verbosity > 2:
print("\nProcessing cluster %s..." % cluster.name)
print("##############################################")
# Create the empty set of vars that we'll populate later
for tag in api.tags.list():
tags_vm[tag.name] = []
# Populate the list of tags and VM's
query = "cluster = %s and status = up and tag = elas_manage" % api.clusters.get(id=cluster.id).name
for vm in paginate(api.vms, query):
if vm.cluster.id == cluster.id:
if vm.status.state == "up":
if not vm.tags.get("elas_manage"):
if options.verbosity > 3:
print("VM %s is discarded because it has no tag elas_manage" % vm.name)
else:
# Add the VM Id to the list of VMS to manage in this cluster
vms_in_cluster.append(vm.id)
for tag in vm.tags.list():
if tag.name[0:8] == "cluster_":
if options.verbosity > 3:
print("VM %s in cluster %s has tag %s" % (vm.name, cluster.name, tag.name))
# Put the TAG in the list of used for this cluster and put the VM to the ones with
# this tag
tags_in_cluster.append(tag.id)
tags_vm[tag.name].append(vm.name)
# Construct a list of tags with more than one vm in state == up to process
for tag in api.tags.list():
if len(tags_vm[tag.name]) > 1:
if tag.name[0:8] == "cluster_":
tags_with_more_than_one.append(tag.name)
if options.verbosity > 3:
print('\nTAGS/VM organization: %s' % tags_vm)
print('TAGS with more than one vm: %s' % tags_with_more_than_one)
tags_to_manage = []
for etiqueta in tags_with_more_than_one:
if len(tags_vm[etiqueta]) > len(hosts_in_cluster):
if options.verbosity > 3:
print("\nMore VM's with tag than available hosts for tag %s, will do as much as I can..." % etiqueta)
else:
if options.verbosity > 3:
print("\nContinuing for tag %s" % etiqueta)
if etiqueta[0:8] == "cluster_":
tags_to_manage.append(etiqueta)
# Removing duplicates
tags = sorted(set(tags_in_cluster))
tags_in_cluster = tags
if options.verbosity > 3:
print("Hosts in cluster:")
print(hosts_in_cluster)
print("Vm's in cluster")
print(vms_in_cluster)
print("Tags in cluster")
print(tags_in_cluster)
for etiqueta in tags_to_manage:
tags_vm_used = set([])
if options.verbosity > 3:
print("Managing tag %s" % etiqueta)
for vm in tags_vm[etiqueta]:
if options.verbosity > 4:
print('Processing vm %s for tag %s at host %s' % (
vm, etiqueta, api.hosts.get(id=api.vms.get(name=vm).host.id).name))
# Set target as actual running host
target = api.vms.get(name=vm).host.id
if api.vms.get(name=vm).host.id not in tags_vm_used:
# Host not yet used, accept it directly
tags_vm_used.add(target)
else:
# Host was in use, searching for new target
for host in hosts_in_cluster:
if host in tags_vm_used:
if options.verbosity > 4:
print("Host %s used, skipping" % host)
else:
if options.verbosity > 4:
print("Host %s not used, migrating there" % host)
# Setting new host
target = host
nombre = api.hosts.get(id=target).name
# Only migrate if VM if there's host change
maquina = api.vms.get(name=vm)
if maquina.host.id != target:
if options.verbosity > 3:
print('Processing vm %s for tag %s at host %s needs migration to host %s' % (
vm, etiqueta, api.hosts.get(id=api.vms.get(name=vm).host.id).name, nombre))
# Allow migration
maquina.placement_policy.host = params.Host()
maquina.placement_policy.affinity = "migratable"
maquina.update()
# Migrate VM to target HOST to satisfy rules
migra(api, options, api.vms.get(name=vm), params.Action(host=api.hosts.get(id=target)))
tags_vm_used.add(target)
else:
if options.verbosity > 4:
print("Skipping migration target=host")
# Discard further migration of any machine
maquina.placement_policy.affinity = "pinned"
maquina.placement_policy.host = api.hosts.get(id=target)
try:
maquina.update()
except:
if options.verbosity > 4:
print("Problem updating VM parameters for pinning")
# MAIN PROGRAM
if __name__ == "__main__":
# Check if we have defined needed tags and create them if missing
check_tags(api, options)
# TAGALL?
# Add elas_maint TAG to every single vm to automate the management
if options.tagall == 1:
if options.verbosity >= 1:
print("Tagging all VM's with elas_manage")
for vm in paginate(api.vms):
try:
vm.tags.add(params.Tag(name="elas_manage"))
except:
print("Error adding elas_manage tag to vm %s" % vm.name)
# CLEANUP
# Remove pinning from vm's in down state to allow to start in any host
query = "status = down"
for vm in paginate(api.vms, query):
if vm.status.state == "down":
if vm.tags.get("elas_manage"):
for tag in vm.tags.list():
if tag.name[0:8] == "cluster_":
if options.verbosity >= 5:
print("Cleaning VM %s pinning to allow to start on any host" % vm.name)
# If powered down, allow machine to be migratable so it can start on any host
maquina = vm
maquina.placement_policy.host = params.Host()
maquina.placement_policy.affinity = "migratable"
maquina.update()
if vm.tags.get("elas_start"):
if options.verbosity >= 5:
print("VM %s should be running, starting..." % vm.name)
# Start machine, as if it had host pinning it couldn't be autostarted using HA
vm.start()
if not options.cluster:
# Processing each cluster of our RHEVM
for cluster in api.clusters.list():
process_cluster(cluster)
else:
process_cluster(api.clusters.get(name=options.cluster))