diff --git a/Android.bp b/Android.bp index 60e85d5..9f88d7e 100644 --- a/Android.bp +++ b/Android.bp @@ -49,6 +49,7 @@ filegroup { "virtgpu.c", "virtgpu_cross_domain.c", "virtgpu_virgl.c", + "intel_device.c", ], } diff --git a/cros_gralloc/cros_gralloc_driver.cc b/cros_gralloc/cros_gralloc_driver.cc index 5136a88..cf28072 100644 --- a/cros_gralloc/cros_gralloc_driver.cc +++ b/cros_gralloc/cros_gralloc_driver.cc @@ -15,7 +15,9 @@ #include #include "../drv_priv.h" +#include "../intel_device.h" #include "../util.h" +#include "drv.h" // Constants taken from pipe_loader_drm.c in Mesa @@ -95,17 +97,6 @@ cros_gralloc_driver *cros_gralloc_driver::get_instance() return &s_instance; } -#define DRV_INIT(drv, type, idx) \ - if (drv) { \ - if (drv_init(drv, type)) { \ - drv_loge("Failed to init driver %d\n", idx); \ - int fd = drv_get_fd(drv); \ - drv_destroy(drv); \ - close(fd); \ - drv = nullptr; \ - } \ - } - #define DRV_DESTROY(drv) \ if (drv) { \ int fd = drv_get_fd(drv); \ @@ -121,9 +112,12 @@ int32_t cros_gralloc_driver::reload() char const *str = "%s/renderD%d"; char *node; + if (gpu_grp_type_ & GPU_GRP_TYPE_HAS_VIRTIO_GPU_IVSHMEM_BIT) { + return 0; + } // Max probe two ivshm node, the first one is used for screen cast. - for (uint32_t i = drv_num_; i < drv_num_ + IVSH_DEVICE_NUM; i++) { - if (asprintf(&node, str, DRM_DIR_NAME, i) < 0) + for (int i = 6; i >= 0; --i) { + if (asprintf(&node, str, DRM_DIR_NAME, DRM_RENDER_NODE_START + i) < 0) continue; fd = open(node, O_RDWR, 0); @@ -138,31 +132,28 @@ int32_t cros_gralloc_driver::reload() } drmFreeVersion(version); - drv_ivshmem_ = drv_create(fd); - if (!drv_ivshmem_) { - drv_loge("Failed to create driver\n"); - close(fd); - continue; - } - - if (drv_init(drv_ivshmem_, gpu_grp_type_)) { - drv_loge("Failed to init driver\n"); - DRV_DESTROY(drv_ivshmem_) - continue; - } - if (drv_virtgpu_is_ivshm(drv_ivshmem_)) { + if (!isVirtioGpuPciDevice(fd)) { drv_logi("New added node is virtio-ivishmem node"); + gpu_grp_type_ |= GPU_GRP_TYPE_HAS_VIRTIO_GPU_IVSHMEM_BIT; + struct driver *drv = drv_create(fd, gpu_grp_type_); + if (!drv) { + drv_loge("Failed to create driver\n"); + close(fd); + continue; + } + drivers_[GPU_GRP_TYPE_VIRTIO_GPU_IVSHMEM_IDX] = drv; + drv_ivshmem_ = drv; return 0; } else { drv_logi("New added node is NOT virtio-ivishmem node"); - DRV_DESTROY(drv_ivshmem_) + close(fd); continue; } } return -ENODEV; } -cros_gralloc_driver::cros_gralloc_driver() +cros_gralloc_driver::cros_gralloc_driver(): drivers_(GPU_GRP_TYPE_NR, nullptr) { /* * Create a driver from render nodes first, then try card @@ -180,32 +171,15 @@ cros_gralloc_driver::cros_gralloc_driver() uint32_t j; char *node; int fd; + int fallback_fd = -1; drmVersionPtr version; const int render_num = 10; - const int name_length = 50; - int node_fd[render_num]; - char *node_name[render_num] = {}; - int availabe_node = 0; - int virtio_node_idx = -1; - int ivshm_node_idx = -1; - int renderer_idx = -1; - int video_idx = -1; + std::vector driver_fds{GPU_GRP_TYPE_NR, -1}; char buf[PROP_VALUE_MAX]; property_get("ro.product.device", buf, "unknown"); mt8183_camera_quirk_ = !strncmp(buf, "kukui", strlen("kukui")); - // destroy drivers if exist before re-initializing them - if (drv_video_ != drv_render_) - DRV_DESTROY(drv_video_) - if (drv_kms_ != drv_render_) - DRV_DESTROY(drv_kms_) - DRV_DESTROY(drv_render_) - - if (drv_ivshmem_) { - DRV_DESTROY(drv_ivshmem_) - } - for (uint32_t i = min_render_node; i < max_render_node; i++) { if (asprintf(&node, render_nodes_fmt, DRM_DIR_NAME, i) < 0) continue; @@ -230,136 +204,98 @@ cros_gralloc_driver::cros_gralloc_driver() } // hit any of undesired render node - if (j < ARRAY_SIZE(undesired)) + if (j < ARRAY_SIZE(undesired)) { + drmFreeVersion(version); + close(fd); continue; - - if (!strcmp(version->name, "virtio_gpu")) { - if (virtio_node_idx == -1) - virtio_node_idx = availabe_node; - else if (ivshm_node_idx == -1) - ivshm_node_idx = availabe_node; } - if (!strcmp(version->name, "i915")) { - // Prefer i915 for performance consideration. - // - // TODO: We might have multiple i915 devices in the system and in - // this case we are effectively using the one with largest card id - // which is normally the dGPU VF or dGPU passed through device. - renderer_idx = availabe_node; - // Use first available i915 node for video bo alloc - if (video_idx == -1) - video_idx = availabe_node; + if (fallback_fd == -1) + fallback_fd = fd; + + // We have several kinds of virtio-GPU devices: + // + // * virtio-GPU supporting blob feature: normal case implemented by ACRN device + // model in SOS. This kind of device is able to import GEM objects from other + // deivces such as Intel GPUs. Hence, for the sake of performance, we would like + // to allocate scan-out buffers from Intel GPUs because in this way 1) the buffers + // are allowed to reside in local memory if the rendering GPU is a descrete one, + // 2) it's easier to support tiled buffers. Depending on whether allow-p2p feature + // is enabled or not, the devices of this kind can be divided into two subclasses: + // + // * If allow-p2p is not supported, the (physical) display is backed by iGPU; + // * Otherwise, the display is backed by dGPU. + // + // The backing display matters because 1) dGPU scans out buffers if and only if + // the buffers reside in local memory, whereas iGPU scans out system memory + // buffers only, 2) iGPU and dGPU support different set of tiling formats, which + // is a headache if we render with dGPU and display with iGPU and vice versa. + // + // * virtio-GPU not supporting blob feature: QNX hypervisor case and Redhat's use + // case. Being incapable of importing external buffers, scan-out buffers are + // required to be allocated by the virtio-GPU itself. + // + // * virtio-GPU backed by inter-VM shared-memory (ivshmem): inter-VM screen cast use + // case. This kind doesn't support importing external buffers neither, and it's + // needed only when the buffers shall be shared for casting. + int gpu_grp_type_idx = get_gpu_type(fd); + + if (gpu_grp_type_idx != -1 && + !(gpu_grp_type_ & (1ull << gpu_grp_type_idx))) { + gpu_grp_type_ |= (1ull << gpu_grp_type_idx); + driver_fds[gpu_grp_type_idx] = fd; + } else if (fd != fallback_fd) { + close(fd); } - - node_fd[availabe_node] = fd; - int len = snprintf(NULL, 0, "%s", version->name); - node_name[availabe_node] = (char *)malloc(len + 1); - strncpy(node_name[availabe_node], version->name, len + 1); - availabe_node++; - drmFreeVersion(version); } - drv_num_ = DRM_RENDER_NODE_START + availabe_node; - - if (availabe_node > 0) { - if ((renderer_idx != -1) && (video_idx != -1) && (video_idx != renderer_idx)) - gpu_grp_type_ |= GPU_TYPE_DUAL_IGPU_DGPU; - // if no i915 node found - if (renderer_idx == -1) { - if (virtio_node_idx != -1) { - // Fallback to virtio-GPU - video_idx = renderer_idx = virtio_node_idx; - } else { - drv_loge("Weird scenario! Neither of i915 nor virtio-GPU device is" - " found. Use the first deivice.\n"); - video_idx = renderer_idx = 0; - } - } - - // Create drv - if (!(drv_render_ = drv_create(node_fd[renderer_idx]))) { - drv_loge("Failed to create driver for the render device with card id %d\n", - renderer_idx); - close(node_fd[renderer_idx]); +restart: + for (int i = 0; i < GPU_GRP_TYPE_NR; ++i) { + if (!(gpu_grp_type_ & (1ull << i))) + continue; + if (drivers_[i] != nullptr) { + DRV_DESTROY(drivers_[i]); } - if (video_idx != renderer_idx) { - drv_video_ = drv_create(node_fd[video_idx]); - if (!drv_video_) { - drv_loge("Failed to create driver for the video device with card id %d\n", - video_idx); - close(node_fd[video_idx]); - } - } else { - drv_video_ = drv_render_; - if (!drv_video_) - drv_loge("Failed to create driver for the video device with card id %d\n", - video_idx); + if (driver_fds[i] == fallback_fd) + fallback_fd = -1; + struct driver *drv = drv_create(driver_fds[i], gpu_grp_type_); + if (!drv) { + drv_loge("failed to init minigbm driver on device of type %d\n", i); + close(driver_fds[i]); + driver_fds[i] = -1; + gpu_grp_type_ &= ~(1ull << i); + goto restart; } + drivers_[i] = drv; + } - if ((virtio_node_idx != -1) && (virtio_node_idx != renderer_idx)) { - drv_kms_ = drv_create(node_fd[virtio_node_idx]); - if (!drv_kms_) { - drv_loge("Failed to create driver for the virtio-gpu device with card id %d\n", - virtio_node_idx); - close(node_fd[virtio_node_idx]); - drv_kms_ = drv_render_; - } - } else { - drv_kms_ = drv_render_; - } + if (fallback_fd >= 0) + close(fallback_fd); - // Init drv - DRV_INIT(drv_render_, gpu_grp_type_, renderer_idx) - if (video_idx != renderer_idx) - DRV_INIT(drv_video_, gpu_grp_type_, video_idx) - if ((virtio_node_idx != -1) && (virtio_node_idx != renderer_idx)) - DRV_INIT(drv_kms_, gpu_grp_type_, virtio_node_idx) - if (drv_kms_ && (virtio_node_idx != renderer_idx) && (drv_kms_ != drv_render_)) { - bool virtiopic_with_blob = drv_virtpci_with_blob(drv_kms_); - if (virtiopic_with_blob) { - //igpu SRIOV or dGPU SRIOV case - drv_logi("Virtio gpu device with blob\n"); - if ((drv_kms_ != drv_render_) && drv_kms_) - DRV_DESTROY(drv_kms_) - drv_kms_ = drv_render_; - } else if (drv_is_dgpu(drv_video_) && drv_virtgpu_is_ivshm(drv_kms_)){ - //is dgpu passthrough + ivshm case - drv_logi("dGPU with Virtio ivshm\n"); - drv_ivshmem_ = drv_kms_; - drv_kms_ = drv_render_; - if (ivshm_node_idx != -1) { - close(node_fd[ivshm_node_idx]); - ivshm_node_idx = -1; - } - } else { - // is QNX or redhat case - drv_logi("Virtio ivshm or no blob\n"); - } - } - if (ivshm_node_idx != -1) { - if (!(drv_ivshmem_ = drv_create(node_fd[ivshm_node_idx]))) { - drv_loge("Failed to create driver for the ivshm device with card id %d\n", - ivshm_node_idx); - close(node_fd[ivshm_node_idx]); - } else { - DRV_INIT(drv_ivshmem_, gpu_grp_type_, ivshm_node_idx) - if (drv_virtgpu_is_ivshm(drv_ivshmem_)) { - drv_logi("Node is virtio-ivishmem node"); - } else { - drv_logi("Node is NOT virtio-ivishmem node"); - DRV_DESTROY(drv_ivshmem_) - } - } + if (gpu_grp_type_ == 0) { + drv_loge("No known device found!\n"); + if (fallback_fd >= 0) { + drv_fallback_ = drv_create(fallback_fd, gpu_grp_type_); + drv_render_ = drv_kms_ = drv_video_ = drv_fallback_; } + return; } - for (int i = 0; i < availabe_node; i++) { - free(node_name[i]); - if ((i != renderer_idx) && (i != video_idx) && (i != virtio_node_idx) && (i != ivshm_node_idx)) { - close(node_fd[i]); - } + int idx = select_render_driver(gpu_grp_type_); + if (idx != -1) { + drv_render_ = drivers_[idx]; + } + idx = select_kms_driver(gpu_grp_type_); + if (idx != -1) { + drv_kms_ = drivers_[idx]; + } + idx = select_video_driver(gpu_grp_type_); + if (idx != -1) { + drv_video_ = drivers_[idx]; + } + if (gpu_grp_type_ & GPU_GRP_TYPE_HAS_VIRTIO_GPU_IVSHMEM_BIT) { + drv_ivshmem_ = drivers_[GPU_GRP_TYPE_VIRTIO_GPU_IVSHMEM_IDX]; } } @@ -367,14 +303,16 @@ cros_gralloc_driver::~cros_gralloc_driver() { buffers_.clear(); handles_.clear(); + if (gpu_grp_type_ == 0) { + DRV_DESTROY(drv_fallback_); + return; + } - if (drv_video_ != drv_render_) - DRV_DESTROY(drv_video_) - if (drv_kms_ != drv_render_) - DRV_DESTROY(drv_kms_) - DRV_DESTROY(drv_render_) - if (drv_ivshmem_) - DRV_DESTROY(drv_ivshmem_) + for (int i = 0; i < GPU_GRP_TYPE_NR; ++i) { + if (gpu_grp_type_ & (1ull << i)) { + DRV_DESTROY(drivers_[i]); + } + } } bool cros_gralloc_driver::is_initialized() @@ -395,12 +333,37 @@ bool cros_gralloc_driver::is_video_format(const struct cros_gralloc_buffer_descr return true; } -bool cros_gralloc_driver::use_ivshm_drv(const struct cros_gralloc_buffer_descriptor *descriptor) +bool cros_gralloc_driver::use_ivshm_drv(const struct cros_gralloc_buffer_descriptor *descriptor, bool retain) { - if ((descriptor->use_flags & BO_USE_SCANOUT) && - (descriptor->width == IVSH_WIDTH) && (descriptor->height == IVSH_HEIGHT)) - return true; - return false; + // To keep original code logic, when calling retain API, we don't have SCANOUT flag + if (retain) { + return (descriptor->width == IVSH_WIDTH) && + (descriptor->height == IVSH_HEIGHT); + } + + return (descriptor->width == IVSH_WIDTH) && + (descriptor->height == IVSH_HEIGHT) && + (descriptor->use_flags & BO_USE_SCANOUT); +} + +struct driver *cros_gralloc_driver::select_driver(const struct cros_gralloc_buffer_descriptor *descriptor, bool retain) +{ + if (use_ivshm_drv(descriptor, retain)) { + // Give it a chance to re-scan devices. + if (drv_ivshmem_ == nullptr) { + reload(); + } + if (drv_ivshmem_) { + return drv_ivshmem_; + } + } + if (is_video_format(descriptor)) { + return drv_video_; + } + if (descriptor->use_flags & BO_USE_SCANOUT) { + return drv_kms_; + } + return drv_render_; } bool cros_gralloc_driver::get_resolved_format_and_use_flags( @@ -411,11 +374,7 @@ bool cros_gralloc_driver::get_resolved_format_and_use_flags( uint64_t resolved_use_flags; struct combination *combo; - struct driver *drv = is_video_format(descriptor) ? drv_video_ : drv_render_; - if (drv_ivshmem_ && use_ivshm_drv(descriptor)) { - drv = drv_ivshmem_; - } else if ((descriptor->use_flags & BO_USE_SCANOUT) && !(is_video_format(descriptor))) - drv = drv_kms_; + struct driver *drv = select_driver(descriptor); if (mt8183_camera_quirk_ && (descriptor->use_flags & BO_USE_CAMERA_READ) && !(descriptor->use_flags & BO_USE_SCANOUT) && @@ -459,15 +418,7 @@ bool cros_gralloc_driver::is_supported(const struct cros_gralloc_buffer_descript { uint32_t resolved_format; uint64_t resolved_use_flags; - struct driver *drv = is_video_format(descriptor) ? drv_video_ : drv_render_; - if (!drv_ivshmem_ && (descriptor->width == IVSH_WIDTH) && (descriptor->height == IVSH_HEIGHT)) { - if (reload()) { - } - } - if (drv_ivshmem_ && use_ivshm_drv(descriptor)) { - drv = drv_ivshmem_; - } else if ((descriptor->use_flags & BO_USE_SCANOUT) && !(is_video_format(descriptor))) - drv = drv_kms_; + struct driver *drv = select_driver(descriptor); uint32_t max_texture_size = drv_get_max_texture_2d_size(drv); if (!get_resolved_format_and_use_flags(descriptor, &resolved_format, &resolved_use_flags)) return false; @@ -514,13 +465,7 @@ int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descripto struct cros_gralloc_handle *hnd; std::unique_ptr buffer; - struct driver *drv; - - drv = is_video_format(descriptor) ? drv_video_ : drv_render_; - if (drv_ivshmem_ && use_ivshm_drv(descriptor)) { - drv = drv_ivshmem_; - } else if ((descriptor->use_flags & BO_USE_SCANOUT) && !(is_video_format(descriptor))) - drv = drv_kms_; + struct driver *drv = select_driver(descriptor); if (!get_resolved_format_and_use_flags(descriptor, &resolved_format, &resolved_use_flags)) { ALOGE("Failed to resolve format and use_flags."); @@ -674,11 +619,7 @@ int32_t cros_gralloc_driver::retain(buffer_handle_t handle) .drm_format = hnd->format, .use_flags = hnd->use_flags, }; - drv = is_video_format(&descriptor) ? drv_video_ : drv_render_; - if (drv_ivshmem_ && (hnd->width == IVSH_WIDTH) && (hnd->height == IVSH_HEIGHT)) { - drv = drv_ivshmem_; - } else if ((hnd->use_flags & BO_USE_SCANOUT) && !(is_video_format(&descriptor))) - drv = drv_kms_; + drv = select_driver(&descriptor, true); auto hnd_it = handles_.find(hnd); if (hnd_it != handles_.end()) { @@ -987,3 +928,73 @@ void cros_gralloc_driver::with_each_buffer( for (const auto &pair : buffers_) function(pair.second.get()); } + +int cros_gralloc_driver::select_render_driver(uint64_t gpu_grp_type) +{ + if (gpu_grp_type & GPU_GRP_TYPE_HAS_INTEL_DGPU_BIT) { + return GPU_GRP_TYPE_INTEL_DGPU_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_INTEL_IGPU_BIT) { + return GPU_GRP_TYPE_INTEL_IGPU_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_BLOB_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_BLOB_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_BLOB_P2P_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_BLOB_P2P_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_NO_BLOB_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_NO_BLOB_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_IVSHMEM_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_IVSHMEM_IDX; + } + return -1; +} + +int cros_gralloc_driver::select_kms_driver(uint64_t gpu_grp_type) +{ + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_NO_BLOB_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_NO_BLOB_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_INTEL_DGPU_BIT) { + return GPU_GRP_TYPE_INTEL_DGPU_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_INTEL_IGPU_BIT) { + return GPU_GRP_TYPE_INTEL_IGPU_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_BLOB_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_BLOB_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_BLOB_P2P_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_BLOB_P2P_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_IVSHMEM_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_IVSHMEM_IDX; + } + return -1; +} + +int cros_gralloc_driver::select_video_driver(uint64_t gpu_grp_type) +{ + if (gpu_grp_type & GPU_GRP_TYPE_HAS_INTEL_IGPU_BIT) { + return GPU_GRP_TYPE_INTEL_IGPU_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_INTEL_DGPU_BIT) { + return GPU_GRP_TYPE_INTEL_DGPU_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_BLOB_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_BLOB_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_BLOB_P2P_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_BLOB_P2P_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_NO_BLOB_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_NO_BLOB_IDX; + } + if (gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_IVSHMEM_BIT) { + return GPU_GRP_TYPE_VIRTIO_GPU_IVSHMEM_IDX; + } + return -1; +} + diff --git a/cros_gralloc/cros_gralloc_driver.h b/cros_gralloc/cros_gralloc_driver.h index 8efc9fa..cf1ed5b 100644 --- a/cros_gralloc/cros_gralloc_driver.h +++ b/cros_gralloc/cros_gralloc_driver.h @@ -16,6 +16,8 @@ #include #include +#include + #if ANDROID_API_LEVEL >= 31 && defined(HAS_DMABUF_SYSTEM_HEAP) #include #endif @@ -62,7 +64,12 @@ class cros_gralloc_driver ~cros_gralloc_driver(); bool is_initialized(); bool is_video_format(const struct cros_gralloc_buffer_descriptor *descriptor); - bool use_ivshm_drv(const struct cros_gralloc_buffer_descriptor *descriptor); + bool use_ivshm_drv(const struct cros_gralloc_buffer_descriptor *descriptor, bool retain); + static int select_render_driver(uint64_t gpu_grp_type); + static int select_kms_driver(uint64_t gpu_grp_type); + static int select_video_driver(uint64_t gpu_grp_type); + void set_gpu_grp_type(); + struct driver *select_driver(const struct cros_gralloc_buffer_descriptor *descriptor, bool retain = false); int32_t reload(); cros_gralloc_buffer *get_buffer(cros_gralloc_handle_t hnd); bool @@ -97,8 +104,10 @@ class cros_gralloc_driver // the drv_ivshmem_ is used to allocate scanout buffer with // certain resolution(screen cast). struct driver *drv_ivshmem_ = nullptr; - uint32_t drv_num_ = 0; - uint64_t gpu_grp_type_ = GPU_TYPE_NORMAL; + struct driver *drv_fallback_ = nullptr; + // This owns the drivers. + std::vector drivers_; + uint64_t gpu_grp_type_ = 0; std::mutex mutex_; std::unordered_map> buffers_; std::unordered_map handles_; diff --git a/drv.c b/drv.c index fdf2478..b3abfb4 100644 --- a/drv.c +++ b/drv.c @@ -102,7 +102,7 @@ static const struct backend *drv_get_backend(int fd) return NULL; } -struct driver *drv_create(int fd) +struct driver *drv_create(int fd, uint64_t gpu_grp_type) { struct driver *drv; int ret; @@ -118,6 +118,7 @@ struct driver *drv_create(int fd) drv->fd = fd; drv->backend = drv_get_backend(fd); + drv->gpu_grp_type = gpu_grp_type; if (!drv->backend) goto free_driver; @@ -140,6 +141,14 @@ struct driver *drv_create(int fd) if (!drv->combos) goto free_mappings; + if (drv->backend->init) { + ret = drv->backend->init(drv); + if (ret) { + drv_array_destroy(drv->combos); + goto free_mappings; + } + } + return drv; free_mappings: @@ -155,20 +164,13 @@ struct driver *drv_create(int fd) return NULL; } -int drv_init(struct driver * drv, uint32_t grp_type) +int drv_set_gpu_grp_type(struct driver *drv, uint64_t type) { int ret = 0; assert(drv); assert(drv->backend); - drv->gpu_grp_type = grp_type; - if (drv->backend->init) { - ret = drv->backend->init(drv); - if (ret) { - drv_array_destroy(drv->combos); - drv_array_destroy(drv->mappings); - } - } + drv->gpu_grp_type = type; return ret; } @@ -834,38 +836,15 @@ uint32_t drv_get_max_texture_2d_size(struct driver *drv) return UINT32_MAX; } -bool drv_virtpci_with_blob(struct driver * drv) -{ - bool ret = false; - assert(drv); - assert(drv->backend); - - if (drv->backend->virtpci_with_blob) { - ret = drv->backend->virtpci_with_blob(drv); - } - return ret; -} - -bool drv_virtgpu_is_ivshm(struct driver * drv) -{ - bool ret = false; - assert(drv); - assert(drv->backend); - - if (drv->backend->virtgpu_is_ivshm) { - ret = drv->backend->virtgpu_is_ivshm(drv); - } - return ret; -} - -bool drv_is_dgpu(struct driver * drv) +bool drv_is_feature_supported(struct driver * drv, uint64_t feature) { bool ret = false; assert(drv); assert(drv->backend); - if (drv->backend->is_dgpu) { - ret = drv->backend->is_dgpu(drv); + if (drv->backend->is_feature_supported) { + ret = drv->backend->is_feature_supported(drv, feature); } return ret; } + diff --git a/drv.h b/drv.h index 3614e2e..d4d0ff2 100644 --- a/drv.h +++ b/drv.h @@ -164,9 +164,9 @@ struct mapping { void drv_preload(bool load); -struct driver *drv_create(int fd); +struct driver *drv_create(int fd, uint64_t gpu_grp_type); -int drv_init(struct driver * drv, uint32_t grp_type); +int drv_init(struct driver * drv); void drv_destroy(struct driver *drv); @@ -250,11 +250,9 @@ int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES], uint32_t drv_get_max_texture_2d_size(struct driver *drv); -bool drv_virtpci_with_blob(struct driver * drv); +int drv_set_gpu_grp_type(struct driver *drv, uint64_t type); -bool drv_virtgpu_is_ivshm(struct driver * drv); - -bool drv_is_dgpu(struct driver * drv); +bool drv_is_feature_supported(struct driver * drv, uint64_t feature); enum drv_log_level { DRV_LOGV, diff --git a/drv_priv.h b/drv_priv.h index 488bbc3..95279a0 100644 --- a/drv_priv.h +++ b/drv_priv.h @@ -60,8 +60,28 @@ struct combination { uint64_t use_flags; }; -#define GPU_TYPE_NORMAL 0 -#define GPU_TYPE_DUAL_IGPU_DGPU (1ull << 0) +enum { + GPU_GRP_TYPE_INTEL_IGPU_IDX = 0, + GPU_GRP_TYPE_INTEL_DGPU_IDX = 1, + GPU_GRP_TYPE_VIRTIO_GPU_BLOB_IDX = 2, + // virtio-GPU with allow-p2p feature, implying its display is backed by dGPU + GPU_GRP_TYPE_VIRTIO_GPU_BLOB_P2P_IDX = 3, + GPU_GRP_TYPE_VIRTIO_GPU_NO_BLOB_IDX = 4, + GPU_GRP_TYPE_VIRTIO_GPU_IVSHMEM_IDX = 5, + GPU_GRP_TYPE_NR, +}; + +#define GPU_GRP_TYPE_HAS_INTEL_IGPU_BIT (1ull << GPU_GRP_TYPE_INTEL_IGPU_IDX) +#define GPU_GRP_TYPE_HAS_INTEL_DGPU_BIT (1ull << GPU_GRP_TYPE_INTEL_DGPU_IDX) +#define GPU_GRP_TYPE_HAS_VIRTIO_GPU_BLOB_BIT (1ull << GPU_GRP_TYPE_VIRTIO_GPU_BLOB_IDX) +#define GPU_GRP_TYPE_HAS_VIRTIO_GPU_BLOB_P2P_BIT (1ull << GPU_GRP_TYPE_VIRTIO_GPU_BLOB_P2P_IDX) +#define GPU_GRP_TYPE_HAS_VIRTIO_GPU_NO_BLOB_BIT (1ull << GPU_GRP_TYPE_VIRTIO_GPU_NO_BLOB_IDX) +#define GPU_GRP_TYPE_HAS_VIRTIO_GPU_IVSHMEM_BIT (1ull << GPU_GRP_TYPE_VIRTIO_GPU_IVSHMEM_IDX) + +#define DRIVER_DEVICE_FEATURE_I915_DGPU (1ull << 1) +#define DRIVER_DEVICE_FEATURE_VIRGL_RESOURCE_BLOB (1ull << 2) +#define DRIVER_DEVICE_FEATURE_VIRGL_QUERY_DEV (1ull << 3) +#define DRIVER_DEVICE_FEATURE_VIRGL_ALLOW_P2P (1ull << 4) struct driver { int fd; @@ -106,9 +126,7 @@ struct backend { int (*resource_info)(struct bo *bo, uint32_t strides[DRV_MAX_PLANES], uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier); uint32_t (*get_max_texture_2d_size)(struct driver *drv); - bool (*virtpci_with_blob)(struct driver *drv); - bool (*virtgpu_is_ivshm)(struct driver *drv); - bool (*is_dgpu)(struct driver *drv); + bool (*is_feature_supported)(struct driver *drv, uint64_t feature); }; // clang-format off diff --git a/external/virtgpu_drm.h b/external/virtgpu_drm.h index 5a23b51..c11aa88 100644 --- a/external/virtgpu_drm.h +++ b/external/virtgpu_drm.h @@ -86,6 +86,7 @@ struct drm_virtgpu_execbuffer { #define VIRTGPU_PARAM_RESOURCE_SYNC 9 /* Synchronization resources */ #define VIRTGPU_PARAM_GUEST_VRAM 10 /* All guest allocations happen via virtgpu dedicated heap. */ #define VIRTGPU_PARAM_QUERY_DEV 11 /* Query the virtio device name. */ +#define VIRTGPU_PARAM_ALLOW_P2P 12 /* Supports P2P, implying its display is backed by dGPU. */ #define VIRTGPU_PARAM_3D_FEATURES_BIT (1ull << VIRTGPU_PARAM_3D_FEATURES) #define VIRTGPU_PARAM_CAPSET_QUERY_FIX_BIT (1ull << VIRTGPU_PARAM_CAPSET_QUERY_FIX) @@ -98,6 +99,7 @@ struct drm_virtgpu_execbuffer { #define VIRTGPU_PARAM_RESOURCE_SYNC_BIT (1ull << VIRTGPU_PARAM_RESOURCE_SYNC) #define VIRTGPU_PARAM_GUEST_VRAM_BIT (1ull << VIRTGPU_PARAM_GUEST_VRAM) #define VIRTGPU_PARAM_QUERY_DEV_BIT (1ull << VIRTGPU_PARAM_QUERY_DEV) +#define VIRTGPU_PARAM_ALLOW_P2P_BIT (1ull << VIRTGPU_PARAM_ALLOW_P2P) struct drm_virtgpu_getparam { __u64 param; diff --git a/gbm.c b/gbm.c index 69ddb7b..e745870 100644 --- a/gbm.c +++ b/gbm.c @@ -56,18 +56,12 @@ PUBLIC struct gbm_device *gbm_create_device(int fd) if (!gbm) return NULL; - gbm->drv = drv_create(fd); + gbm->drv = drv_create(fd, 0); if (!gbm->drv) { free(gbm); return NULL; } - if (drv_init(gbm->drv, 0) != 0) { - drv_destroy(gbm->drv); - free(gbm); - return NULL; - } - return gbm; } diff --git a/i915.c b/i915.c index 4e7b83e..cdb7be2 100644 --- a/i915.c +++ b/i915.c @@ -4,6 +4,7 @@ * found in the LICENSE file. */ +#include "drv.h" #ifdef DRV_I915 #include @@ -439,11 +440,12 @@ static int i915_add_combinations(struct driver *drv) drv_add_combinations(drv, linear_source_formats, ARRAY_SIZE(linear_source_formats), &metadata_x_tiled, texture_flags_video | BO_USE_CAMERA_MASK); - if (i915_has_tile4(i915)) { // in dual gpu case, only alloc x-tiling for dgpu for render - if ((drv->gpu_grp_type & GPU_TYPE_DUAL_IGPU_DGPU) && (GEN_VERSION_X10(i915) >= 125)) + if ((drv->gpu_grp_type & GPU_GRP_TYPE_HAS_INTEL_IGPU_BIT) || + (drv->gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_BLOB_BIT)) { return 0; + } struct format_metadata metadata_4_tiled = { .tiling = I915_TILING_4, .priority = 3, @@ -473,8 +475,9 @@ static int i915_add_combinations(struct driver *drv) struct format_metadata metadata_y_tiled = { .tiling = I915_TILING_Y, .priority = 3, .modifier = I915_FORMAT_MOD_Y_TILED }; - if (drv->gpu_grp_type & GPU_TYPE_DUAL_IGPU_DGPU) { - scanout_and_render_not_linear = unset_flags(scanout_and_render, BO_USE_SCANOUT); + if ((drv->gpu_grp_type & GPU_GRP_TYPE_HAS_INTEL_DGPU_BIT) || + (drv->gpu_grp_type & GPU_GRP_TYPE_HAS_VIRTIO_GPU_BLOB_P2P_BIT)) { + return 0; } /* Support y-tiled NV12 and P010 for libva */ #ifdef I915_SCANOUT_Y_TILED @@ -1395,10 +1398,16 @@ static int i915_bo_flush(struct bo *bo, struct mapping *mapping) return 0; } -static bool i915_is_dgpu(struct driver *drv) +static bool i915_is_feature_supported(struct driver *drv, uint64_t feature) { struct i915_device *i915 = drv->priv; - return i915->has_local_mem; + switch (feature) { + case DRIVER_DEVICE_FEATURE_I915_DGPU: + return i915->has_local_mem; + default: + return false; + } + return false; } const struct backend backend_i915 = { @@ -1415,7 +1424,7 @@ const struct backend backend_i915 = { .bo_flush = i915_bo_flush, .resolve_format_and_use_flags = drv_resolve_format_and_use_flags_helper, .num_planes_from_modifier = i915_num_planes_from_modifier, - .is_dgpu = i915_is_dgpu, + .is_feature_supported = i915_is_feature_supported, }; #endif diff --git a/intel_device.c b/intel_device.c new file mode 100644 index 0000000..297878a --- /dev/null +++ b/intel_device.c @@ -0,0 +1,321 @@ +// Copyright (c) 2024 Intel Corporation +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "external/virtgpu_drm.h" + +#include "intel_device.h" + +#define ARRAY_SIZE(A) (sizeof(A) / sizeof(*(A))) + +#define GEN_VERSION_X10(dev) ((dev)->graphics_version * 10 + (dev)->sub_version) + +#define VIRTGPU_PARAM_QUERY_DEV 11 /* Query the virtio device name. */ +#define VIRTGPU_PARAM_ALLOW_P2P 12 + +static int gem_param(int fd, int name) +{ + int v = -1; /* No param uses (yet) the sign bit, reserve it for errors */ + + struct drm_i915_getparam gp = { .param = name, .value = &v }; + if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp)) + return -1; + + return v; +} + +struct intel_gpu_info { + int graphics_version; + int sub_version; + bool is_xelpd; +}; + +static int intel_gpu_info_from_device_id(uint16_t device_id, struct intel_gpu_info *i915) +{ + const uint16_t gen4_ids[] = { 0x29A2, 0x2992, 0x2982, 0x2972, 0x2A02, 0x2A12, 0x2A42, + 0x2E02, 0x2E12, 0x2E22, 0x2E32, 0x2E42, 0x2E92 }; + const uint16_t gen5_ids[] = { 0x0042, 0x0046 }; + const uint16_t gen6_ids[] = { 0x0102, 0x0112, 0x0122, 0x0106, 0x0116, 0x0126, 0x010A }; + const uint16_t gen7_ids[] = { + 0x0152, 0x0162, 0x0156, 0x0166, 0x015a, 0x016a, 0x0402, 0x0412, 0x0422, + 0x0406, 0x0416, 0x0426, 0x040A, 0x041A, 0x042A, 0x040B, 0x041B, 0x042B, + 0x040E, 0x041E, 0x042E, 0x0C02, 0x0C12, 0x0C22, 0x0C06, 0x0C16, 0x0C26, + 0x0C0A, 0x0C1A, 0x0C2A, 0x0C0B, 0x0C1B, 0x0C2B, 0x0C0E, 0x0C1E, 0x0C2E, + 0x0A02, 0x0A12, 0x0A22, 0x0A06, 0x0A16, 0x0A26, 0x0A0A, 0x0A1A, 0x0A2A, + 0x0A0B, 0x0A1B, 0x0A2B, 0x0A0E, 0x0A1E, 0x0A2E, 0x0D02, 0x0D12, 0x0D22, + 0x0D06, 0x0D16, 0x0D26, 0x0D0A, 0x0D1A, 0x0D2A, 0x0D0B, 0x0D1B, 0x0D2B, + 0x0D0E, 0x0D1E, 0x0D2E, 0x0F31, 0x0F32, 0x0F33, 0x0157, 0x0155 + }; + const uint16_t gen8_ids[] = { 0x22B0, 0x22B1, 0x22B2, 0x22B3, 0x1602, 0x1606, + 0x160A, 0x160B, 0x160D, 0x160E, 0x1612, 0x1616, + 0x161A, 0x161B, 0x161D, 0x161E, 0x1622, 0x1626, + 0x162A, 0x162B, 0x162D, 0x162E }; + const uint16_t gen9_ids[] = { + 0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916, 0x1917, + 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927, 0x192A, 0x192B, + 0x192D, 0x1932, 0x193A, 0x193B, 0x193D, 0x0A84, 0x1A84, 0x1A85, 0x5A84, 0x5A85, + 0x3184, 0x3185, 0x5902, 0x5906, 0x590A, 0x5908, 0x590B, 0x590E, 0x5913, 0x5915, + 0x5917, 0x5912, 0x5916, 0x591A, 0x591B, 0x591D, 0x591E, 0x5921, 0x5923, 0x5926, + 0x5927, 0x593B, 0x591C, 0x87C0, 0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, + 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B, 0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, + 0x3EA8, 0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2, 0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, + 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0, 0x9BC2, 0x9BC4, 0x9BC5, + 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6 + }; + const uint16_t gen11_ids[] = { 0x8A50, 0x8A51, 0x8A52, 0x8A53, 0x8A54, 0x8A56, 0x8A57, + 0x8A58, 0x8A59, 0x8A5A, 0x8A5B, 0x8A5C, 0x8A5D, 0x8A71, + 0x4500, 0x4541, 0x4551, 0x4555, 0x4557, 0x4571, 0x4E51, + 0x4E55, 0x4E57, 0x4E61, 0x4E71 }; + const uint16_t gen12_ids[] = { + 0x4c8a, 0x4c8b, 0x4c8c, 0x4c90, 0x4c9a, 0x4680, 0x4681, 0x4682, 0x4683, 0x4688, + 0x4689, 0x4690, 0x4691, 0x4692, 0x4693, 0x4698, 0x4699, 0x4626, 0x4628, 0x462a, + 0x46a0, 0x46a1, 0x46a2, 0x46a3, 0x46a6, 0x46a8, 0x46aa, 0x46b0, 0x46b1, 0x46b2, + 0x46b3, 0x46c0, 0x46c1, 0x46c2, 0x46c3, 0x9A40, 0x9A49, 0x9A59, 0x9A60, 0x9A68, + 0x9A70, 0x9A78, 0x9AC0, 0x9AC9, 0x9AD9, 0x9AF8, 0x4905, 0x4906, 0x4907, 0x4908 + }; + const uint16_t adlp_ids[] = { 0x46A0, 0x46A1, 0x46A2, 0x46A3, 0x46A6, 0x46A8, 0x46AA, + 0x462A, 0x4626, 0x4628, 0x46B0, 0x46B1, 0x46B2, 0x46B3, + 0x46C0, 0x46C1, 0x46C2, 0x46C3, 0x46D0, 0x46D1, 0x46D2 }; + + const uint16_t dg2_ids[] = { // DG2 Val-Only Super-SKU: 4F80 - 4F87 + 0x4F80, 0x4F81, 0x4F82, 0x4F83, 0x4F84, 0x4F85, 0x4F86, 0x4F87, + + // DG2 Desktop Reserved: 56A0 to 56AF + 0x56A0, 0x56A1, 0x56A2, 0x56A3, 0x56A4, 0x56A5, 0x56A6, 0x56A7, + 0x56A8, 0x56A9, 0x56AA, 0x56AB, 0x56AC, 0x56AD, 0x56AE, 0x56AF, + + // DG2 Notebook Reserved: 5690 to 569F + 0x5690, 0x5691, 0x5692, 0x5693, 0x5694, 0x5695, 0x5696, 0x5697, + 0x5698, 0x5699, 0x569A, 0x569B, 0x569C, 0x569D, 0x569E, 0x569F, + + // Workstation Reserved: 56B0 to 56BF + 0x56B0, 0x56B1, 0x56B2, 0x56B3, 0x56B4, 0x56B5, 0x56B6, 0x56B7, + 0x56B8, 0x56B9, 0x56BA, 0x56BB, 0x56BC, 0x56BD, 0x56BE, 0x56BF, + + // Server Reserved: 56C0 to 56CF + 0x56C0, 0x56C1, 0x56C2, 0x56C3, 0x56C4, 0x56C5, 0x56C6, 0x56C7, + 0x56C8, 0x56C9, 0x56CA, 0x56CB, 0x56CC, 0x56CD, 0x56CE, 0x56CF + }; + + const uint16_t rplp_ids[] = { 0xA720, 0xA721, 0xA7A0, 0xA7A1, 0xA7A8, 0xA7A9 }; + + const uint16_t mtl_ids[] = { 0x7D40, 0x7D60, 0x7D45, 0x7D55, 0x7DD5 }; + + unsigned i; + /* Gen 4 */ + for (i = 0; i < ARRAY_SIZE(gen4_ids); i++) + if (gen4_ids[i] == device_id) { + i915->graphics_version = 4; + i915->sub_version = 0; + i915->is_xelpd = false; + return 0; + } + + /* Gen 5 */ + for (i = 0; i < ARRAY_SIZE(gen5_ids); i++) + if (gen5_ids[i] == device_id) { + i915->graphics_version = 5; + i915->sub_version = 0; + i915->is_xelpd = false; + return 0; + } + + /* Gen 6 */ + for (i = 0; i < ARRAY_SIZE(gen6_ids); i++) + if (gen6_ids[i] == device_id) { + i915->graphics_version = 6; + i915->sub_version = 0; + i915->is_xelpd = false; + return 0; + } + + /* Gen 7 */ + for (i = 0; i < ARRAY_SIZE(gen7_ids); i++) + if (gen7_ids[i] == device_id) { + i915->graphics_version = 7; + return 0; + } + + /* Gen 8 */ + for (i = 0; i < ARRAY_SIZE(gen8_ids); i++) + if (gen8_ids[i] == device_id) { + i915->graphics_version = 8; + i915->sub_version = 0; + i915->is_xelpd = false; + return 0; + } + + /* Gen 9 */ + for (i = 0; i < ARRAY_SIZE(gen9_ids); i++) + if (gen9_ids[i] == device_id) { + i915->graphics_version = 9; + i915->sub_version = 0; + i915->is_xelpd = false; + return 0; + } + + /* Gen 11 */ + for (i = 0; i < ARRAY_SIZE(gen11_ids); i++) + if (gen11_ids[i] == device_id) { + i915->graphics_version = 11; + i915->sub_version = 0; + i915->is_xelpd = false; + return 0; + } + + /* Gen 12 */ + for (i = 0; i < ARRAY_SIZE(gen12_ids); i++) + if (gen12_ids[i] == device_id) { + i915->graphics_version = 12; + i915->sub_version = 0; + i915->is_xelpd = false; + return 0; + } + + for (i = 0; i < ARRAY_SIZE(dg2_ids); i++) + if (dg2_ids[i] == device_id) { + i915->graphics_version = 12; + i915->sub_version = 5; + i915->is_xelpd = false; + return 0; + } + + for (i = 0; i < ARRAY_SIZE(adlp_ids); i++) + if (adlp_ids[i] == device_id) { + i915->graphics_version = 12; + i915->sub_version = 5; + i915->is_xelpd = true; + return 0; + } + + for (i = 0; i < ARRAY_SIZE(rplp_ids); i++) + if (rplp_ids[i] == device_id) { + i915->graphics_version = 12; + i915->sub_version = 0; + i915->is_xelpd = true; + return 0; + } + + for (i = 0; i < ARRAY_SIZE(mtl_ids); i++) + if (mtl_ids[i] == device_id) { + i915->graphics_version = 14; + i915->sub_version = 0; + i915->is_xelpd = false; + return 0; + } + + return -1; +} + +bool isIntelDg2(int fd) +{ + int ret; + uint16_t device_id; + struct intel_gpu_info info; + + ret = gem_param(fd, I915_PARAM_CHIPSET_ID); + if (ret == -1) { + return false; + } + device_id = (uint16_t)ret; + ret = intel_gpu_info_from_device_id(device_id, &info); + return GEN_VERSION_X10(&info) == 125; +} + +bool isVirtioGpuAllowP2p(int virtgpu_fd) +{ + struct drm_virtgpu_getparam get_param = { 0, 0 }; + uint64_t value = 0; + get_param.param = VIRTGPU_PARAM_ALLOW_P2P; + get_param.value = (__u64)&value; + int ret = drmIoctl(virtgpu_fd, DRM_IOCTL_VIRTGPU_GETPARAM, &get_param); + if (ret || value != 1) { + return false; + } + return true; +} + +bool isVirtioGpuPciDevice(int virtgpu_fd) +{ + struct drm_virtgpu_getparam get_param = { 0, 0 }; + uint64_t value = 0; + get_param.param = VIRTGPU_PARAM_QUERY_DEV; + get_param.value = (__u64)&value; + int ret = drmIoctl(virtgpu_fd, DRM_IOCTL_VIRTGPU_GETPARAM, &get_param); + if (ret || value != 1) { + return false; + } + return true; +} + +bool isVirtioGpuWithBlob(int virtgpu_fd) +{ + struct drm_virtgpu_getparam get_param = { 0, 0 }; + uint64_t value = 0; + get_param.param = VIRTGPU_PARAM_RESOURCE_BLOB; + get_param.value = (__u64)&value; + int ret = drmIoctl(virtgpu_fd, DRM_IOCTL_VIRTGPU_GETPARAM, &get_param); + if (ret || value != 1) { + return false; + } + return true; +} + +int get_gpu_type(int fd) +{ + int type = -1; + drmVersionPtr version = drmGetVersion(fd); + if (version == NULL) { + return type; + } + if (strcmp(version->name, "i915") == 0) { + if (isIntelDg2(fd)) { + type = GPU_GRP_TYPE_INTEL_DGPU_IDX; + } else { + type = GPU_GRP_TYPE_INTEL_IGPU_IDX; + } + } else if (strcmp(version->name, "virtio_gpu") == 0) { + if (!isVirtioGpuPciDevice(fd)) { + type = GPU_GRP_TYPE_VIRTIO_GPU_IVSHMEM_IDX; + } else { + if (!isVirtioGpuWithBlob(fd)) { + type = GPU_GRP_TYPE_VIRTIO_GPU_NO_BLOB_IDX; + } else { + if (isVirtioGpuAllowP2p(fd)) { + type = GPU_GRP_TYPE_VIRTIO_GPU_BLOB_P2P_IDX; + } else { + type = GPU_GRP_TYPE_VIRTIO_GPU_BLOB_IDX; + } + } + } + } + drmFreeVersion(version); + return type; +} + diff --git a/intel_device.h b/intel_device.h new file mode 100644 index 0000000..d6ac2b5 --- /dev/null +++ b/intel_device.h @@ -0,0 +1,44 @@ +// Copyright (c) 2024 Intel Corporation +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#pragma once + +#ifndef INTEL_DEVICE_ +#define INTEL_DEVICE_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +bool isIntelDg2(int fd); +bool isVirtioGpuAllowP2p(int virtgpu_fd); +bool isVirtioGpuPciDevice(int virtgpu_fd); +bool isVirtioGpuWithBlob(int virtgpu_fd); + +int get_gpu_type(int fd); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/virtgpu.h b/virtgpu.h index 19ef43c..f6b9823 100644 --- a/virtgpu.h +++ b/virtgpu.h @@ -29,7 +29,7 @@ static struct virtgpu_param params[] = { PARAM(VIRTGPU_PARAM_CROSS_DEVICE), PARAM(VIRTGPU_PARAM_CONTEXT_INIT), PARAM(VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs), PARAM(VIRTGPU_PARAM_CREATE_GUEST_HANDLE), PARAM(VIRTGPU_PARAM_RESOURCE_SYNC), PARAM(VIRTGPU_PARAM_GUEST_VRAM), - PARAM(VIRTGPU_PARAM_QUERY_DEV), + PARAM(VIRTGPU_PARAM_QUERY_DEV), PARAM(VIRTGPU_PARAM_ALLOW_P2P), }; enum virtgpu_param_id { diff --git a/virtgpu_virgl.c b/virtgpu_virgl.c index 0509d69..ed6890e 100644 --- a/virtgpu_virgl.c +++ b/virtgpu_virgl.c @@ -614,6 +614,9 @@ static int virgl_init(struct driver *drv) if ((strcmp(params[i].name, "VIRTGPU_PARAM_RESOURCE_BLOB") == 0) && (params[i].value == 1)) { prv->dev_feature |= VIRTGPU_PARAM_RESOURCE_BLOB_BIT; } + if ((strcmp(params[i].name, "VIRTGPU_PARAM_ALLOW_P2P") == 0) && (params[i].value == 1)) { + prv->dev_feature |= VIRTGPU_PARAM_RESOURCE_BLOB_BIT; + } } } @@ -1178,14 +1181,22 @@ static uint32_t virgl_get_max_texture_2d_size(struct driver *drv) return VIRGL_2D_MAX_TEXTURE_2D_SIZE; } -static bool virgl_virtpci_with_blob(struct driver *drv) { +static bool virgl_is_feature_supported(struct driver *drv, uint64_t feature) { struct virgl_priv *prv = (struct virgl_priv *)drv->priv; - return ((prv->dev_feature & VIRTGPU_PARAM_QUERY_DEV_BIT ) && (prv->dev_feature & VIRTGPU_PARAM_RESOURCE_BLOB_BIT)); -} - -static bool virgl_drv_virtgpu_is_ivshm(struct driver *drv) { - struct virgl_priv *prv = (struct virgl_priv *)drv->priv; - return (!(prv->dev_feature & VIRTGPU_PARAM_QUERY_DEV_BIT) && (prv->dev_feature & VIRTGPU_PARAM_RESOURCE_BLOB_BIT)); + switch (feature) { + case DRIVER_DEVICE_FEATURE_VIRGL_RESOURCE_BLOB: + feature = VIRTGPU_PARAM_RESOURCE_BLOB_BIT; + break; + case DRIVER_DEVICE_FEATURE_VIRGL_QUERY_DEV: + feature = VIRTGPU_PARAM_QUERY_DEV_BIT; + break; + case DRIVER_DEVICE_FEATURE_VIRGL_ALLOW_P2P: + feature = VIRTGPU_PARAM_ALLOW_P2P_BIT; + break; + default: + return false; + } + return !!(prv->dev_feature & feature); } const struct backend virtgpu_virgl = { .name = "virtgpu_virgl", @@ -1203,5 +1214,4 @@ const struct backend virtgpu_virgl = { .name = "virtgpu_virgl", virgl_resolve_format_and_use_flags, .resource_info = virgl_resource_info, .get_max_texture_2d_size = virgl_get_max_texture_2d_size, - .virtpci_with_blob = virgl_virtpci_with_blob, - .virtgpu_is_ivshm = virgl_drv_virtgpu_is_ivshm }; + .is_feature_supported = virgl_is_feature_supported, };