欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

nova 挂载volume

程序员文章站 2022-05-11 14:05:13
...
nova 挂载volume的入口在E:\nova\nova\api\openstack\compute\volumes.py 中的VolumeAttachmentController类的create方法
 def create(self, req, server_id, body):
        """Attach a volume to an instance."""
        context = req.environ['nova.context']
        context.can(va_policies.POLICY_ROOT % 'create')

        volume_id = body['volumeAttachment']['volumeId']
        device = body['volumeAttachment'].get('device')
        tag = body['volumeAttachment'].get('tag')
//根据server_id 得到这个instance
        instance = common.get_instance(self.compute_api, context, server_id)

        if instance.vm_state in (vm_states.SHELVED,
                                 vm_states.SHELVED_OFFLOADED):
            _check_request_version(req, '2.20', 'attach_volume',
                                   server_id, instance.vm_state)

        try:
            device = self.compute_api.attach_volume(context, instance,
                                                    volume_id, device, tag=tag)
        except (exception.InstanceUnknownCell,
可以看到最终调用nova/computer/api.py 中的attach_volume
def attach_volume(self, context, instance, volume_id, device=None,
                      disk_bus=None, device_type=None, tag=None):
        """Attach an existing volume to an existing instance."""
        # NOTE(vish): Fail fast if the device is not going to pass. This
        #             will need to be removed along with the test if we
        #             change the logic in the manager for what constitutes
        #             a valid device.
        if device and not block_device.match_device(device):
            raise exception.InvalidDevicePath(path=device)

        is_shelved_offloaded = instance.vm_state == vm_states.SHELVED_OFFLOADED
        if is_shelved_offloaded:
            if tag:
                # NOTE(artom) Local attach (to a shelved-offload instance)
                # cannot support device tagging because we have no way to call
                # the compute manager to check that it supports device tagging.
                # In fact, we don't even know which computer manager the
                # instance will eventually end up on when it's unshelved.
                raise exception.VolumeTaggedAttachToShelvedNotSupported()
            return self._attach_volume_shelved_offloaded(context,
                                                         instance,
                                                         volume_id,
                                                         device,
                                                         disk_bus,
                                                         device_type)

        return self._attach_volume(context, instance, volume_id, device,
                                   disk_bus, device_type, tag=tag)
这个函数又调用_attach_volume
def _attach_volume(self, context, instance, volume_id, device,
                       disk_bus, device_type, tag=None):
        """Attach an existing volume to an existing instance.

        This method is separated to make it possible for cells version
        to override it.
        """
        volume_bdm = self._create_volume_bdm(
            context, instance, device, volume_id, disk_bus=disk_bus,
            device_type=device_type, tag=tag)
        try:
            self._check_attach_and_reserve_volume(context, volume_id, instance)
            self.compute_rpcapi.attach_volume(context, instance, volume_bdm)
        except Exception:
            with excutils.save_and_reraise_exception():
                volume_bdm.destroy()

        return volume_bdm.device_name
首先调用_create_volume_bdm 创建volume_bdm,然后通过BlockDeviceMapping检查是否可以挂载,最后调用attach_volume
def _create_volume_bdm(self, context, instance, device, volume_id,
                           disk_bus, device_type, is_local_creation=False,
                           tag=None):
        if is_local_creation:
            # when the creation is done locally we can't specify the device
            # name as we do not have a way to check that the name specified is
            # a valid one.
            # We leave the setting of that value when the actual attach
            # happens on the compute manager
            # NOTE(artom) Local attach (to a shelved-offload instance) cannot
            # support device tagging because we have no way to call the compute
            # manager to check that it supports device tagging. In fact, we
            # don't even know which computer manager the instance will
            # eventually end up on when it's unshelved.
            volume_bdm = objects.BlockDeviceMapping(
                context=context,
                source_type='volume', destination_type='volume',
                instance_uuid=instance.uuid, boot_index=None,
                volume_id=volume_id,
                device_name=None, guest_format=None,
                disk_bus=disk_bus, device_type=device_type)
            volume_bdm.create()
        else:
            # NOTE(vish): This is done on the compute host because we want
            #             to avoid a race where two devices are requested at
            #             the same time. When db access is removed from
            #             compute, the bdm will be created here and we will
            #             have to make sure that they are assigned atomically.
            volume_bdm = self.compute_rpcapi.reserve_block_device_name(
                context, instance, device, volume_id, disk_bus=disk_bus,
                device_type=device_type, tag=tag)
        return volume_bdm
可以看到这里的_create_volume_bdm其实就是创建对应的表格,看BlockDeviceMapping的实现就会知道
@base.NovaObjectRegistry.register
class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
                         base.NovaObjectDictCompat):
    def _create(self, context, update_or_create=False):
        """Create the block device record in the database.

        In case the id field is set on the object, and if the instance is set
        raise an ObjectActionError. Resets all the changes on the object.

        Returns None

        :param context: security context used for database calls
        :param update_or_create: consider existing block devices for the
                instance based on the device name and swap, and only update
                the ones that match. Normally only used when creating the
                instance for the first time.
        """
        cell_type = cells_opts.get_cell_type()
        if cell_type == 'api':
            raise exception.ObjectActionError(
                    action='create',
                    reason='BlockDeviceMapping cannot be '
                           'created in the API cell.')

        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        if 'instance' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason='instance assigned')

        cells_create = update_or_create or None
        if update_or_create:
            db_bdm = db.block_device_mapping_update_or_create(
                    context, updates, legacy=False)
        else:
            db_bdm = db.block_device_mapping_create(
                    context, updates, legacy=False)

        self._from_db_object(context, self, db_bdm)
        # NOTE(alaski): bdms are looked up by instance uuid and device_name
        # so if we sync up with no device_name an entry will be created that
        # will not be found on a later update_or_create call and a second bdm
        # create will occur.
        if cell_type == 'compute' and db_bdm.get('device_name') is not None:
            cells_api = cells_rpcapi.CellsAPI()
            cells_api.bdm_update_or_create_at_top(
                    context, self, create=cells_create)

    @base.remotable
    def create(self):
        self._create(self._context)
可以看到BlockDeviceMapping 类中的create 最终会写数据库

    def _check_attach_and_reserve_volume(self, context, volume_id, instance):
        volume = self.volume_api.get(context, volume_id)
        self.volume_api.check_availability_zone(context, volume,
                                                instance=instance)
        self.volume_api.reserve_volume(context, volume_id)

        return volume
_check_attach_and_reserve_volume 分别调用volume_api的check_availability_zone 检查zone是否可以挂载volume,然后再调用预留要挂载的volume,
这两个函数最终通过clinet调用到cinder的API来实现
继续看nova/computer/api.py 中的_attach_volume调用的attach_volume,这个函数最终在nova/computer/manager.py中实现
ef attach_volume(self, context, instance, bdm):
        """Attach a volume to an instance."""
        driver_bdm = driver_block_device.convert_volume(bdm)

        @utils.synchronized(instance.uuid)
        def do_attach_volume(context, instance, driver_bdm):
            try:
                return self._attach_volume(context, instance, driver_bdm)
            except Exception:
                with excutils.save_and_reraise_exception():
                    bdm.destroy()

        do_attach_volume(context, instance, driver_bdm)
继续调用_attach_volume
def _attach_volume(self, context, instance, bdm):
        context = context.elevated()
        LOG.info('Attaching volume %(volume_id)s to %(mountpoint)s',
                 {'volume_id': bdm.volume_id,
                  'mountpoint': bdm['mount_device']},
                 instance=instance)
        compute_utils.notify_about_volume_attach_detach(
            context, instance, self.host,
            action=fields.NotificationAction.VOLUME_ATTACH,
            phase=fields.NotificationPhase.START,
            volume_id=bdm.volume_id)
        try:
            bdm.attach(context, instance, self.volume_api, self.driver,
                       do_driver_attach=True)
这里的核心是调用bdm.attach。这就会调用到nova/virt/block_device.py 中不同类的实现,这里假定用的是DriverVolumeBlockDevice中的attach
def attach(self, context, instance, volume_api, virt_driver,
               do_driver_attach=False, **kwargs):


        connector = virt_driver.get_volume_connector(instance)
        connection_info = volume_api.initialize_connection(context,
                                                           volume_id,
                                                           connector)
		if 'serial' not in connection_info:
            connection_info['serial'] = self.volume_id
        self._preserve_multipath_id(connection_info)

        # If do_driver_attach is False, we will attach a volume to an instance
        # at boot time. So actual attach is done by instance creation code.
        if do_driver_attach:
            encryption = encryptors.get_encryption_metadata(
                context, volume_api, volume_id, connection_info)

            try:
                virt_driver.attach_volume(
                        context, connection_info, instance,
                        self['mount_device'], disk_bus=self['disk_bus'],
                        device_type=self['device_type'], encryption=encryption)
      
	  
首先看get_volume_connector,假定我们用的是libvirt。E:\nova\nova\virt\libvirt\driver.py
    def get_volume_connector(self, instance):
        root_helper = utils.get_root_helper()
        return connector.get_connector_properties(
            root_helper, CONF.my_block_storage_ip,
            CONF.libvirt.volume_use_multipath,
            enforce_multipath=True,
            host=CONF.host)
E:\nova\nova\tests\unit\virt\libvirt\fake_os_brick_connector.py
def get_connector_properties(root_helper, my_ip, multipath, enforce_multipath,
                             host=None):
    """Fake os-brick."""

    props = {}
    props['ip'] = my_ip
    props['host'] = host
    iscsi = ISCSIConnector('')
    props['initiator'] = iscsi.get_initiator()
    props['wwpns'] = ['100010604b019419']
    props['wwnns'] = ['200010604b019419']
    props['multipath'] = multipath
    props['platform'] = 'x86_64'
    props['os_type'] = 'linux2'
    return props
可以看到这里主要是返回计算节点的信息,如ip,操作系统等
 def initialize_connection(self, context, volume_id, connector):
        try:
            connection_info = cinderclient(
                context).volumes.initialize_connection(volume_id, connector)
            connection_info['connector'] = connector
            return connection_info
而这里的initialize_connection 其实是调用cinder的initialize_connection。
回到nova/virt/block_device.py 中的attach调用virt_driver.attach_volume 来最后挂载volume
其源码在E:\nova\nova\virt\libvirt\driver.py
def attach_volume(self, context, connection_info, instance, mountpoint,
                      disk_bus=None, device_type=None, encryption=None):

        guest.attach_device(conf, persistent=True, live=live)
可见最后调用guest.attach_device 来挂载volume
其源码在E:\nova\nova\virt\libvirt\guest.py
    def attach_device(self, conf, persistent=False, live=False):
        """Attaches device to the guest.

        :param conf: A LibvirtConfigObject of the device to attach
        :param persistent: A bool to indicate whether the change is
                           persistent or not
        :param live: A bool to indicate whether it affect the guest
                     in running state
        """
        flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
        flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0

        device_xml = conf.to_xml()
        if six.PY3 and isinstance(device_xml, six.binary_type):
            device_xml = device_xml.decode('utf-8')

        LOG.debug("attach device xml: %s", device_xml)
        self._domain.attachDeviceFlags(device_xml, flags=flags)
可见这里最后调用类似virsh attach-devces命令把设备挂载到虚拟机中