mirror of
https://github.com/torvalds/linux.git
synced 2024-11-19 18:41:48 +00:00
c575b7eeb8
Add support for Xen para-virtualized frontend display driver. Accompanying backend [1] is implemented as a user-space application and its helper library [2], capable of running as a Weston client or DRM master. Configuration of both backend and frontend is done via Xen guest domain configuration options [3]. Driver limitations: 1. Only primary plane without additional properties is supported. 2. Only one video mode supported which resolution is configured via XenStore. 3. All CRTCs operate at fixed frequency of 60Hz. 1. Implement Xen bus state machine for the frontend driver according to the state diagram and recovery flow from display para-virtualized protocol: xen/interface/io/displif.h. 2. Read configuration values from Xen store according to xen/interface/io/displif.h protocol: - read connector(s) configuration - read buffer allocation mode (backend/frontend) 3. Handle Xen event channels: - create for all configured connectors and publish corresponding ring references and event channels in Xen store, so backend can connect - implement event channels interrupt handlers - create and destroy event channels with respect to Xen bus state 4. Implement shared buffer handling according to the para-virtualized display device protocol at xen/interface/io/displif.h: - handle page directories according to displif protocol: - allocate and share page directories - grant references to the required set of pages for the page directory - allocate xen balllooned pages via Xen balloon driver with alloc_xenballooned_pages/free_xenballooned_pages - grant references to the required set of pages for the shared buffer itself - implement pages map/unmap for the buffers allocated by the backend (gnttab_map_refs/gnttab_unmap_refs) 5. Implement kernel modesetiing/connector handling using DRM simple KMS helper pipeline: - implement KMS part of the driver with the help of DRM simple pipepline helper which is possible due to the fact that the para-virtualized driver only supports a single (primary) plane: - initialize connectors according to XenStore configuration - handle frame done events from the backend - create and destroy frame buffers and propagate those to the backend - propagate set/reset mode configuration to the backend on display enable/disable callbacks - send page flip request to the backend and implement logic for reporting backend IO errors on prepare fb callback - implement virtual connector handling: - support only pixel formats suitable for single plane modes - make sure the connector is always connected - support a single video mode as per para-virtualized driver configuration 6. Implement GEM handling depending on driver mode of operation: depending on the requirements for the para-virtualized environment, namely requirements dictated by the accompanying DRM/(v)GPU drivers running in both host and guest environments, number of operating modes of para-virtualized display driver are supported: - display buffers can be allocated by either frontend driver or backend - display buffers can be allocated to be contiguous in memory or not Note! Frontend driver itself has no dependency on contiguous memory for its operation. 6.1. Buffers allocated by the frontend driver. The below modes of operation are configured at compile-time via frontend driver's kernel configuration. 6.1.1. Front driver configured to use GEM CMA helpers This use-case is useful when used with accompanying DRM/vGPU driver in guest domain which was designed to only work with contiguous buffers, e.g. DRM driver based on GEM CMA helpers: such drivers can only import contiguous PRIME buffers, thus requiring frontend driver to provide such. In order to implement this mode of operation para-virtualized frontend driver can be configured to use GEM CMA helpers. 6.1.2. Front driver doesn't use GEM CMA If accompanying drivers can cope with non-contiguous memory then, to lower pressure on CMA subsystem of the kernel, driver can allocate buffers from system memory. Note! If used with accompanying DRM/(v)GPU drivers this mode of operation may require IOMMU support on the platform, so accompanying DRM/vGPU hardware can still reach display buffer memory while importing PRIME buffers from the frontend driver. 6.2. Buffers allocated by the backend This mode of operation is run-time configured via guest domain configuration through XenStore entries. For systems which do not provide IOMMU support, but having specific requirements for display buffers it is possible to allocate such buffers at backend side and share those with the frontend. For example, if host domain is 1:1 mapped and has DRM/GPU hardware expecting physically contiguous memory, this allows implementing zero-copying use-cases. Note, while using this scenario the following should be considered: a) If guest domain dies then pages/grants received from the backend cannot be claimed back b) Misbehaving guest may send too many requests to the backend exhausting its grant references and memory (consider this from security POV). Note! Configuration options 1.1 (contiguous display buffers) and 2 (backend allocated buffers) are not supported at the same time. 7. Handle communication with the backend: - send requests and wait for the responses according to the displif protocol - serialize access to the communication channel - time-out used for backend communication is set to 3000 ms - manage display buffers shared with the backend [1] https://github.com/xen-troops/displ_be [2] https://github.com/xen-troops/libxenbe [3] https://xenbits.xen.org/gitweb/?p=xen.git;a=blob;f=docs/man/xl.cfg.pod.5.in;h=a699367779e2ae1212ff8f638eff0206ec1a1cc9;hb=refs/heads/master#l1257 Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20180403112317.28751-2-andr2000@gmail.com
388 lines
9.1 KiB
C
388 lines
9.1 KiB
C
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
|
|
|
/*
|
|
* Xen para-virtual DRM device
|
|
*
|
|
* Copyright (C) 2016-2018 EPAM Systems Inc.
|
|
*
|
|
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
|
|
*/
|
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/irq.h>
|
|
|
|
#include <xen/xenbus.h>
|
|
#include <xen/events.h>
|
|
#include <xen/grant_table.h>
|
|
|
|
#include "xen_drm_front.h"
|
|
#include "xen_drm_front_evtchnl.h"
|
|
|
|
static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id)
|
|
{
|
|
struct xen_drm_front_evtchnl *evtchnl = dev_id;
|
|
struct xen_drm_front_info *front_info = evtchnl->front_info;
|
|
struct xendispl_resp *resp;
|
|
RING_IDX i, rp;
|
|
unsigned long flags;
|
|
|
|
if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
|
|
return IRQ_HANDLED;
|
|
|
|
spin_lock_irqsave(&front_info->io_lock, flags);
|
|
|
|
again:
|
|
rp = evtchnl->u.req.ring.sring->rsp_prod;
|
|
/* ensure we see queued responses up to rp */
|
|
virt_rmb();
|
|
|
|
for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
|
|
resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
|
|
if (unlikely(resp->id != evtchnl->evt_id))
|
|
continue;
|
|
|
|
switch (resp->operation) {
|
|
case XENDISPL_OP_PG_FLIP:
|
|
case XENDISPL_OP_FB_ATTACH:
|
|
case XENDISPL_OP_FB_DETACH:
|
|
case XENDISPL_OP_DBUF_CREATE:
|
|
case XENDISPL_OP_DBUF_DESTROY:
|
|
case XENDISPL_OP_SET_CONFIG:
|
|
evtchnl->u.req.resp_status = resp->status;
|
|
complete(&evtchnl->u.req.completion);
|
|
break;
|
|
|
|
default:
|
|
DRM_ERROR("Operation %d is not supported\n",
|
|
resp->operation);
|
|
break;
|
|
}
|
|
}
|
|
|
|
evtchnl->u.req.ring.rsp_cons = i;
|
|
|
|
if (i != evtchnl->u.req.ring.req_prod_pvt) {
|
|
int more_to_do;
|
|
|
|
RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
|
|
more_to_do);
|
|
if (more_to_do)
|
|
goto again;
|
|
} else {
|
|
evtchnl->u.req.ring.sring->rsp_event = i + 1;
|
|
}
|
|
|
|
spin_unlock_irqrestore(&front_info->io_lock, flags);
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
|
|
{
|
|
struct xen_drm_front_evtchnl *evtchnl = dev_id;
|
|
struct xen_drm_front_info *front_info = evtchnl->front_info;
|
|
struct xendispl_event_page *page = evtchnl->u.evt.page;
|
|
u32 cons, prod;
|
|
unsigned long flags;
|
|
|
|
if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
|
|
return IRQ_HANDLED;
|
|
|
|
spin_lock_irqsave(&front_info->io_lock, flags);
|
|
|
|
prod = page->in_prod;
|
|
/* ensure we see ring contents up to prod */
|
|
virt_rmb();
|
|
if (prod == page->in_cons)
|
|
goto out;
|
|
|
|
for (cons = page->in_cons; cons != prod; cons++) {
|
|
struct xendispl_evt *event;
|
|
|
|
event = &XENDISPL_IN_RING_REF(page, cons);
|
|
if (unlikely(event->id != evtchnl->evt_id++))
|
|
continue;
|
|
|
|
switch (event->type) {
|
|
case XENDISPL_EVT_PG_FLIP:
|
|
xen_drm_front_on_frame_done(front_info, evtchnl->index,
|
|
event->op.pg_flip.fb_cookie);
|
|
break;
|
|
}
|
|
}
|
|
page->in_cons = cons;
|
|
/* ensure ring contents */
|
|
virt_wmb();
|
|
|
|
out:
|
|
spin_unlock_irqrestore(&front_info->io_lock, flags);
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static void evtchnl_free(struct xen_drm_front_info *front_info,
|
|
struct xen_drm_front_evtchnl *evtchnl)
|
|
{
|
|
unsigned long page = 0;
|
|
|
|
if (evtchnl->type == EVTCHNL_TYPE_REQ)
|
|
page = (unsigned long)evtchnl->u.req.ring.sring;
|
|
else if (evtchnl->type == EVTCHNL_TYPE_EVT)
|
|
page = (unsigned long)evtchnl->u.evt.page;
|
|
if (!page)
|
|
return;
|
|
|
|
evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
|
|
|
|
if (evtchnl->type == EVTCHNL_TYPE_REQ) {
|
|
/* release all who still waits for response if any */
|
|
evtchnl->u.req.resp_status = -EIO;
|
|
complete_all(&evtchnl->u.req.completion);
|
|
}
|
|
|
|
if (evtchnl->irq)
|
|
unbind_from_irqhandler(evtchnl->irq, evtchnl);
|
|
|
|
if (evtchnl->port)
|
|
xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
|
|
|
|
/* end access and free the page */
|
|
if (evtchnl->gref != GRANT_INVALID_REF)
|
|
gnttab_end_foreign_access(evtchnl->gref, 0, page);
|
|
|
|
memset(evtchnl, 0, sizeof(*evtchnl));
|
|
}
|
|
|
|
static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
|
|
struct xen_drm_front_evtchnl *evtchnl,
|
|
enum xen_drm_front_evtchnl_type type)
|
|
{
|
|
struct xenbus_device *xb_dev = front_info->xb_dev;
|
|
unsigned long page;
|
|
grant_ref_t gref;
|
|
irq_handler_t handler;
|
|
int ret;
|
|
|
|
memset(evtchnl, 0, sizeof(*evtchnl));
|
|
evtchnl->type = type;
|
|
evtchnl->index = index;
|
|
evtchnl->front_info = front_info;
|
|
evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
|
|
evtchnl->gref = GRANT_INVALID_REF;
|
|
|
|
page = get_zeroed_page(GFP_NOIO | __GFP_HIGH);
|
|
if (!page) {
|
|
ret = -ENOMEM;
|
|
goto fail;
|
|
}
|
|
|
|
if (type == EVTCHNL_TYPE_REQ) {
|
|
struct xen_displif_sring *sring;
|
|
|
|
init_completion(&evtchnl->u.req.completion);
|
|
mutex_init(&evtchnl->u.req.req_io_lock);
|
|
sring = (struct xen_displif_sring *)page;
|
|
SHARED_RING_INIT(sring);
|
|
FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
|
|
|
|
ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
|
|
if (ret < 0) {
|
|
evtchnl->u.req.ring.sring = NULL;
|
|
free_page(page);
|
|
goto fail;
|
|
}
|
|
|
|
handler = evtchnl_interrupt_ctrl;
|
|
} else {
|
|
ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
|
|
virt_to_gfn((void *)page), 0);
|
|
if (ret < 0) {
|
|
free_page(page);
|
|
goto fail;
|
|
}
|
|
|
|
evtchnl->u.evt.page = (struct xendispl_event_page *)page;
|
|
gref = ret;
|
|
handler = evtchnl_interrupt_evt;
|
|
}
|
|
evtchnl->gref = gref;
|
|
|
|
ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
|
|
if (ret < 0)
|
|
goto fail;
|
|
|
|
ret = bind_evtchn_to_irqhandler(evtchnl->port,
|
|
handler, 0, xb_dev->devicetype,
|
|
evtchnl);
|
|
if (ret < 0)
|
|
goto fail;
|
|
|
|
evtchnl->irq = ret;
|
|
return 0;
|
|
|
|
fail:
|
|
DRM_ERROR("Failed to allocate ring: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
|
|
{
|
|
struct xen_drm_front_cfg *cfg;
|
|
int ret, conn;
|
|
|
|
cfg = &front_info->cfg;
|
|
|
|
front_info->evt_pairs =
|
|
kcalloc(cfg->num_connectors,
|
|
sizeof(struct xen_drm_front_evtchnl_pair),
|
|
GFP_KERNEL);
|
|
if (!front_info->evt_pairs) {
|
|
ret = -ENOMEM;
|
|
goto fail;
|
|
}
|
|
|
|
for (conn = 0; conn < cfg->num_connectors; conn++) {
|
|
ret = evtchnl_alloc(front_info, conn,
|
|
&front_info->evt_pairs[conn].req,
|
|
EVTCHNL_TYPE_REQ);
|
|
if (ret < 0) {
|
|
DRM_ERROR("Error allocating control channel\n");
|
|
goto fail;
|
|
}
|
|
|
|
ret = evtchnl_alloc(front_info, conn,
|
|
&front_info->evt_pairs[conn].evt,
|
|
EVTCHNL_TYPE_EVT);
|
|
if (ret < 0) {
|
|
DRM_ERROR("Error allocating in-event channel\n");
|
|
goto fail;
|
|
}
|
|
}
|
|
front_info->num_evt_pairs = cfg->num_connectors;
|
|
return 0;
|
|
|
|
fail:
|
|
xen_drm_front_evtchnl_free_all(front_info);
|
|
return ret;
|
|
}
|
|
|
|
static int evtchnl_publish(struct xenbus_transaction xbt,
|
|
struct xen_drm_front_evtchnl *evtchnl,
|
|
const char *path, const char *node_ring,
|
|
const char *node_chnl)
|
|
{
|
|
struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev;
|
|
int ret;
|
|
|
|
/* write control channel ring reference */
|
|
ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref);
|
|
if (ret < 0) {
|
|
xenbus_dev_error(xb_dev, ret, "writing ring-ref");
|
|
return ret;
|
|
}
|
|
|
|
/* write event channel ring reference */
|
|
ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port);
|
|
if (ret < 0) {
|
|
xenbus_dev_error(xb_dev, ret, "writing event channel");
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info)
|
|
{
|
|
struct xenbus_transaction xbt;
|
|
struct xen_drm_front_cfg *plat_data;
|
|
int ret, conn;
|
|
|
|
plat_data = &front_info->cfg;
|
|
|
|
again:
|
|
ret = xenbus_transaction_start(&xbt);
|
|
if (ret < 0) {
|
|
xenbus_dev_fatal(front_info->xb_dev, ret,
|
|
"starting transaction");
|
|
return ret;
|
|
}
|
|
|
|
for (conn = 0; conn < plat_data->num_connectors; conn++) {
|
|
ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
|
|
plat_data->connectors[conn].xenstore_path,
|
|
XENDISPL_FIELD_REQ_RING_REF,
|
|
XENDISPL_FIELD_REQ_CHANNEL);
|
|
if (ret < 0)
|
|
goto fail;
|
|
|
|
ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt,
|
|
plat_data->connectors[conn].xenstore_path,
|
|
XENDISPL_FIELD_EVT_RING_REF,
|
|
XENDISPL_FIELD_EVT_CHANNEL);
|
|
if (ret < 0)
|
|
goto fail;
|
|
}
|
|
|
|
ret = xenbus_transaction_end(xbt, 0);
|
|
if (ret < 0) {
|
|
if (ret == -EAGAIN)
|
|
goto again;
|
|
|
|
xenbus_dev_fatal(front_info->xb_dev, ret,
|
|
"completing transaction");
|
|
goto fail_to_end;
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
xenbus_transaction_end(xbt, 1);
|
|
|
|
fail_to_end:
|
|
xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store");
|
|
return ret;
|
|
}
|
|
|
|
void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl)
|
|
{
|
|
int notify;
|
|
|
|
evtchnl->u.req.ring.req_prod_pvt++;
|
|
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
|
|
if (notify)
|
|
notify_remote_via_irq(evtchnl->irq);
|
|
}
|
|
|
|
void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
|
|
enum xen_drm_front_evtchnl_state state)
|
|
{
|
|
unsigned long flags;
|
|
int i;
|
|
|
|
if (!front_info->evt_pairs)
|
|
return;
|
|
|
|
spin_lock_irqsave(&front_info->io_lock, flags);
|
|
for (i = 0; i < front_info->num_evt_pairs; i++) {
|
|
front_info->evt_pairs[i].req.state = state;
|
|
front_info->evt_pairs[i].evt.state = state;
|
|
}
|
|
spin_unlock_irqrestore(&front_info->io_lock, flags);
|
|
}
|
|
|
|
void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info)
|
|
{
|
|
int i;
|
|
|
|
if (!front_info->evt_pairs)
|
|
return;
|
|
|
|
for (i = 0; i < front_info->num_evt_pairs; i++) {
|
|
evtchnl_free(front_info, &front_info->evt_pairs[i].req);
|
|
evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
|
|
}
|
|
|
|
kfree(front_info->evt_pairs);
|
|
front_info->evt_pairs = NULL;
|
|
}
|