openstack-nova-虚拟机创建流程以及源码分析(二)

openstack 虚机创建流程以及源码分析(二)

基于openstack stein

前面调度完主机后开始rpc通知计算节点启动

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#nova/conductor/manager.py #
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping=None, legacy_bdm=True,
request_spec=None, host_lists=None):
.......................
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host.service_host, image=image,
request_spec=local_reqspec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host.nodename,
limits=host.limits, host_list=host_list)
1
2
3
4
5
6
7
def build_and_run_instance(self, ctxt, instance, host, image, request_spec,
filter_properties, admin_password=None, injected_files=None,
requested_networks=None, security_groups=None,
block_device_mapping=None, node=None, limits=None,
host_list=None):
.........
cctxt.cast(ctxt, 'build_and_run_instance', **kwargs)

自此消息cast 传给了compute 节点

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#nova/compute/manager.py
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None, host_list=None):

@utils.synchronized(instance.uuid)
def _locked_do_build_and_run_instance(*args, **kwargs):
# NOTE(danms): We grab the semaphore with the instance uuid
# locked because we could wait in line to build this instance
# for a while and we want to make sure that nothing else tries
# to do anything with this instance while we wait.
with self._build_semaphore:
try:
result = self._do_build_and_run_instance(*args, **kwargs)
.............................
utils.spawn_n(_locked_do_build_and_run_instance,
context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups,
block_device_mapping, node, limits, host_list)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
def _do_build_and_run_instance(self, context, instance, image,
request_spec, filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node=None, limits=None, host_list=None):

try:
LOG.debug('Starting instance...', instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
(task_states.SCHEDULING, None))
except exception.InstanceNotFound:
msg = 'Instance disappeared before build.'
LOG.debug(msg, instance=instance)
return build_results.FAILED
except exception.UnexpectedTaskStateError as e:
LOG.debug(e.format_message(), instance=instance)
return build_results.FAILED

# b64 decode the files to inject:
decoded_files = self._decode_files(injected_files)

if limits is None:
limits = {}

if node is None:
node = self._get_nodename(instance, refresh=True)

try:
with timeutils.StopWatch() as timer:
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password, requested_networks,
security_groups, block_device_mapping, node, limits,
filter_properties, request_spec)
LOG.info('Took %0.2f seconds to build instance.',
timer.elapsed(), instance=instance)
return build_results.ACTIVE
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def _build_and_run_instance(self, context, instance, image, injected_files,
admin_password, requested_networks, security_groups,
block_device_mapping, node, limits, filter_properties,
request_spec=None):
..............................
try:
scheduler_hints = self._get_scheduler_hints(filter_properties,
request_spec)
with self.rt.instance_claim(context, instance, node, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
scheduler_hints)
image_meta = objects.ImageMeta.from_dict(image)

LOG.debug('image_meta:%s, image:%s' % (image_meta, image))
# verify image checksum
if CONF.enable_verify_image_md5sum:
self.driver.verify_image_md5sum(context, instance, image_meta.id)

request_group_resource_providers_mapping = \
self._get_request_group_mapping(request_spec)

with self._build_resources(context, instance,
requested_networks, security_groups, image_meta,
block_device_mapping,
request_group_resource_providers_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
# NOTE(JoshNang) This also saves the changes to the
# instance from _allocate_network_async, as they aren't
# saved in that function to prevent races.
instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING)
block_device_info = resources['block_device_info']
network_info = resources['network_info']
allocs = resources['allocations']
LOG.debug('Start spawning the instance on the hypervisor.',
instance=instance)
with timeutils.StopWatch() as timer:
LOG.debug("_build_and_run_instance self.driver.spawn "
"image_meta:%s, block_device_info:%s"
% (image_meta, block_device_info))
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
allocs, network_info=network_info,
block_device_info=block_device_info,
destroy_disks_on_failure=True)
LOG.info('Took %0.2f seconds to spawn the instance on '
'the hypervisor.', timer.elapsed(),
instance=instance)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
def _build_resources(self, context, instance, requested_networks,
security_groups, image_meta, block_device_mapping,
resource_provider_mapping):
try:
LOG.debug('Start building networks asynchronously for instance.',
instance=instance)
network_info = self._build_networks_for_instance(context, instance,
requested_networks, security_groups,
resource_provider_mapping)
..................
try:
# Perform any driver preparation work for the driver.
self.driver.prepare_for_spawn(instance)

# Depending on a virt driver, some network configuration is
# necessary before preparing block devices.
self.driver.prepare_networks_before_block_device_mapping(
instance, network_info)

# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(instance, image_meta,
block_device_mapping)

LOG.debug('Start building block device mappings for instance.',
instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()

block_device_info = self._prep_block_device(context, instance,
block_device_mapping)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
class DriverVolImageBlockDevice(DriverVolumeBlockDevice):

_valid_source = 'image'
_proxy_as_attr_inherited = set(['image_id'])

def attach(self, context, instance, volume_api,
virt_driver, wait_func=None):
if not self.volume_id:
av_zone = _get_volume_create_az_value(instance)
vol = volume_api.create(context, self.volume_size,
'', '', image_id=self.image_id,
volume_type=self.volume_type,
availability_zone=av_zone)
if wait_func:
self._call_wait_func(context, wait_func, volume_api, vol['id'])

self.volume_id = vol['id']

# TODO(mriedem): Create an attachment to reserve the volume and
# make us go down the new-style attach flow.

super(DriverVolImageBlockDevice, self).attach(
context, instance, volume_api, virt_driver)

保存instance状态为BUILD状态,然后启用超时watch调用 self._build_and_run_instance, build虚机时对镜像等相关进行检查 , 再调用 self._build_resources 调用self._build_networks_for_instance异步准备分配网络并将instance task 更改为networking, 然后再将instance task 更改为block_device_mapping状态并调用self._prep_block_device开始调用cinder api创建volume并attach,再上报虚机信息到placement,至此准备基本做完了,将instance task状态变更为spawning ,再调用libvirt.spawn创建虚机了

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#nova/libvirt/driver.py
def spawn(self, context, instance, image_meta, injected_files,
admin_password, allocations, network_info=None,
block_device_info=None, destroy_disks_on_failure=False):
.............
self._create_image(context, instance, disk_info['mapping'],
injection_info=injection_info,
block_device_info=block_device_info)
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
mdevs=mdevs)
self._create_domain_and_network(
context, xml, instance, network_info,
block_device_info=block_device_info,
post_xml_callback=gen_confdrive,
destroy_disks_on_failure=destroy_disks_on_failure)

创建image镜像,准备xml信息,然后调用_create_domain_and_network 创建libvirt guest信息 以及网络vif等信息,进行 plug_vifs 以及根据所选的防火墙driver进行 配置基础的filter (Set up basic filtering (MAC, IP, and ARP spoofing protection) 以及准备虚机的basic 防火墙filter (放行dhcp udp 67/68端口等),再直接调用libvirt 的driver启动虚机。