1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
| cinder.scheduler.filter_scheduler.FilterScheduler.schedule_create_volume: def _get_weighted_candidates(self, context, request_spec, filter_properties=None): """Return a list of backends that meet required specs.
Returned list is ordered by their fitness. """ elevated = context.elevated()
# Since Cinder is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, copying # 'volume_XX' to 'resource_XX' will make both filters happy. volume_type = request_spec.get("volume_type") resource_type = volume_type if volume_type is not None else {}
config_options = self._get_configuration_options()
if filter_properties is None: filter_properties = {} self._populate_retry(filter_properties, request_spec)
request_spec_dict = jsonutils.to_primitive(request_spec)
filter_properties.update({'context': context, 'request_spec': request_spec_dict, 'config_options': config_options, 'volume_type': volume_type, 'resource_type': resource_type})
self.populate_filter_properties(request_spec, filter_properties)
# If multiattach is enabled on a volume, we need to add # multiattach to extra specs, so that the capability # filtering is enabled. multiattach = request_spec['volume_properties'].get('multiattach', False) if multiattach and 'multiattach' not in resource_type.get( 'extra_specs', {}): if 'extra_specs' not in resource_type: resource_type['extra_specs'] = {}
resource_type['extra_specs'].update( multiattach='<is> True')
# Revert volume consumed capacity if it's a rescheduled request retry = filter_properties.get('retry', {}) if retry.get('backends', []): self.host_manager.revert_volume_consumed_capacity( retry['backends'][-1], request_spec['volume_properties']['size']) # Find our local list of acceptable backends by filtering and # weighing our options. we virtually consume resources on # it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only # traverse this list once. backends = self.host_manager.get_all_backend_states(elevated)
# Filter local hosts based on requirements ... backends = self.host_manager.get_filtered_backends(backends, filter_properties) if not backends: return []
LOG.debug("Filtered %s", backends) # weighted_backends = WeightedHost() ... the best # backend for the job. weighed_backends = self.host_manager.get_weighed_backends( backends, filter_properties) return weighed_backends def _schedule(self, context, request_spec, filter_properties=None): weighed_backends = self._get_weighted_candidates(context, request_spec, filter_properties) # When we get the weighed_backends, we clear those backends that don't # match the resource's backend (it could be assigend from group, # snapshot or volume). resource_backend = request_spec.get('resource_backend') if weighed_backends and resource_backend: resource_backend_has_pool = bool(utils.extract_host( resource_backend, 'pool')) # Get host name including host@backend#pool info from # weighed_backends. for backend in weighed_backends[::-1]: backend_id = ( backend.obj.backend_id if resource_backend_has_pool else utils.extract_host(backend.obj.backend_id) ) if backend_id != resource_backend: weighed_backends.remove(backend) if not weighed_backends: LOG.warning('No weighed backend found for volume ' 'with properties: %s', filter_properties['request_spec'].get('volume_type')) return None return self._choose_top_backend(weighed_backends, request_spec) def schedule_create_volume(self, context, request_spec, filter_properties): backend = self._schedule(context, request_spec, filter_properties)
if not backend: raise exception.NoValidBackend(reason=_("No weighed backends " "available"))
backend = backend.obj volume_id = request_spec['volume_id']
updated_volume = driver.volume_update_db( context, volume_id, backend.host, backend.cluster_name, availability_zone=backend.service['availability_zone']) self._post_select_populate_filter_properties(filter_properties, backend)
# context is not serializable filter_properties.pop('context', None)
self.volume_rpcapi.create_volume(context, updated_volume, request_spec, filter_properties, allow_reschedule=True)
|