There are a few parts to this implementation.
QEMU, virglrenderer and virtio-gpu. The way it works is by letting the guest applications speak unmodified OpenGL to the Mesa. But instead of Mesa handing commands over to the hardware it is channeled through virtio-gpu on the guest to QEMU on the host.
QEMU then receives the raw graphics stack state (Gallium state) and interprets it using virglrenderer from the raw state into an OpenGL form, which can be executed as entirely normal OpenGL on the host machine.
The host OpenGL stack does not even have to be Mesa, and could for example be the proprietary nvidia stack.
听起来这个逻辑非常的长,guest端需要把渲染命令opegl->mesa->guest_kernel->host_qemu_XXX->Opengl->mesa->host_kernel->display
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
…
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,
.config_changed = virtio_gpu_config_changed
};
DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
//作为一款drm驱动,支持的功能
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
.dumb_create = virtio_gpu_mode_dumb_create,
.dumb_map_offset = virtio_gpu_mode_dumb_mmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,
.gem_free_object_unlocked = virtio_gpu_gem_free_object,
.gem_open_object = virtio_gpu_gem_object_open,
.gem_close_object = virtio_gpu_gem_object_close,
.fops = &virtio_gpu_driver_fops,
.ioctls = virtio_gpu_ioctls,
.num_ioctls = DRM_VIRTIO_NUM_IOCTLS,
...
.patchlevel = DRIVER_PATCHLEVEL,
};
probe
virtio_gpu_probe
…
dev = drm_dev_alloc(&driver, &vdev->dev);
//填充driver节点
ret = drm_dev_init(dev, driver, parent);
//初始化一个新的drm设备,并把driver赋值给dev->driver
if (!drm_core_init_complete) {
//如果drm_core_init没好,是不能进行初始化[drm_drv.c]
DRM_ERROR("DRM core is not initialized\n");
return -ENODEV;
}
…
ret = drmm_add_action(dev, drm_dev_init_release, NULL);
//增加自动release设置?
dev->anon_inode = drm_fs_inode_new();
//创建文件系统节点
r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
//simple_pin_fs,检查文件系统是否挂接,如果没有挂接,就挂接文件系统。其参数mount返回已挂接文件系统的结构vfsmount实例,参数count返回挂接操作次数。
inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
//申请一个inode节点
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
}
//创建render128节点
ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
//创建card0节点[/dev/dri/card0]
ret = drm_ht_create(&dev->map_hash, 12);
//hase表创建,不知道用来做什么
drm_legacy_ctxbitmap_init(dev);
//过时的功能,没去看
if (drm_core_check_feature(dev, DRIVER_GEM)) {
ret = drm_gem_init(dev);
//一个重点
}
ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
//设置独一无二的dev的名字,也是我们modetest -M指定的名字
if (!strcmp(vdev->dev.parent->bus->name, "pci")) {
ret = virtio_gpu_pci_quirk(dev, vdev);
}
//看上去是如果bus是pci的处理
ret = virtio_gpu_init(dev);
ret = drm_dev_register(dev, 0);
drm_fbdev_generic_setup(vdev->priv, 32);
…
open:
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv;
int handle;
/* can't create contexts without 3d renderer */
if (!vgdev->has_virgl_3d)
return 0;
/* allocate a virt GPU context for this opener */
vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
if (!vfpriv)
return -ENOMEM;
mutex_init(&vfpriv->context_lock);
handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
if (handle < 0) {
kfree(vfpriv);
return handle;
}
vfpriv->ctx_id = handle + 1;
file->driver_priv = vfpriv;
return 0;
}
先看和buffer相关的部分:
https://blog.csdn.net/hexiaolong2009/article/details/106532966
.dumb_create = virtio_gpu_mode_dumb_create,
.dumb_map_offset = virtio_gpu_mode_dumb_mmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,
.gem_free_object_unlocked = virtio_gpu_gem_free_object,
.gem_open_object = virtio_gpu_gem_object_open,
.gem_close_object = virtio_gpu_gem_object_close,
.fops = &virtio_gpu_driver_fops,
对应的上层调用接口:
drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_req);
对应的驱动接口:
.dumb_create = virtio_gpu_mode_dumb_create,
ret = virtio_gpu_gem_create(file_priv, dev, ¶ms, &gobj, &args->handle);
ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
ret = drm_gem_handle_create(file, &obj->base.base, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&obj->base.base);
(1)创建 gem object
(2)创建 gem handle
(3)分配物理 buffer (也可以等到后面再分配)
看上去是把操作交给原始的那个drm
code:
<https://elixir.bootlin.com/linux/latest/source/drivers/gpu/drm/virtio/virtgpu_drv.c#L96>