mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 08:02:07 +00:00
Merge remote branch 'origin/drm-core-next' into test
Conflicts: drivers/gpu/drm/drm_fb_helper.c
This commit is contained in:
commit
f846761853
@ -15,7 +15,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
|
|||||||
|
|
||||||
drm-$(CONFIG_COMPAT) += drm_ioc32.o
|
drm-$(CONFIG_COMPAT) += drm_ioc32.o
|
||||||
|
|
||||||
drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o
|
drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
|
||||||
|
|
||||||
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
|
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
|
||||||
|
|
||||||
|
@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
|
|||||||
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
|
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
|
||||||
drm_tv_subconnector_enum_list)
|
drm_tv_subconnector_enum_list)
|
||||||
|
|
||||||
|
static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
|
||||||
|
{ DRM_MODE_DIRTY_OFF, "Off" },
|
||||||
|
{ DRM_MODE_DIRTY_ON, "On" },
|
||||||
|
{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
|
||||||
|
};
|
||||||
|
|
||||||
|
DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
|
||||||
|
drm_dirty_info_enum_list)
|
||||||
|
|
||||||
struct drm_conn_prop_enum_list {
|
struct drm_conn_prop_enum_list {
|
||||||
int type;
|
int type;
|
||||||
char *name;
|
char *name;
|
||||||
@ -801,6 +810,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mode_create_dithering_property);
|
EXPORT_SYMBOL(drm_mode_create_dithering_property);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_mode_create_dirty_property - create dirty property
|
||||||
|
* @dev: DRM device
|
||||||
|
*
|
||||||
|
* Called by a driver the first time it's needed, must be attached to desired
|
||||||
|
* connectors.
|
||||||
|
*/
|
||||||
|
int drm_mode_create_dirty_info_property(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_property *dirty_info;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (dev->mode_config.dirty_info_property)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
dirty_info =
|
||||||
|
drm_property_create(dev, DRM_MODE_PROP_ENUM |
|
||||||
|
DRM_MODE_PROP_IMMUTABLE,
|
||||||
|
"dirty",
|
||||||
|
ARRAY_SIZE(drm_dirty_info_enum_list));
|
||||||
|
for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
|
||||||
|
drm_property_add_enum(dirty_info, i,
|
||||||
|
drm_dirty_info_enum_list[i].type,
|
||||||
|
drm_dirty_info_enum_list[i].name);
|
||||||
|
dev->mode_config.dirty_info_property = dirty_info;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_mode_config_init - initialize DRM mode_configuration structure
|
* drm_mode_config_init - initialize DRM mode_configuration structure
|
||||||
* @dev: DRM device
|
* @dev: DRM device
|
||||||
@ -1753,6 +1792,71 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
|
||||||
|
void *data, struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
struct drm_clip_rect __user *clips_ptr;
|
||||||
|
struct drm_clip_rect *clips = NULL;
|
||||||
|
struct drm_mode_fb_dirty_cmd *r = data;
|
||||||
|
struct drm_mode_object *obj;
|
||||||
|
struct drm_framebuffer *fb;
|
||||||
|
unsigned flags;
|
||||||
|
int num_clips;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
mutex_lock(&dev->mode_config.mutex);
|
||||||
|
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
|
||||||
|
if (!obj) {
|
||||||
|
DRM_ERROR("invalid framebuffer id\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_err1;
|
||||||
|
}
|
||||||
|
fb = obj_to_fb(obj);
|
||||||
|
|
||||||
|
num_clips = r->num_clips;
|
||||||
|
clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
|
||||||
|
|
||||||
|
if (!num_clips != !clips_ptr) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_err1;
|
||||||
|
}
|
||||||
|
|
||||||
|
flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
|
||||||
|
|
||||||
|
/* If userspace annotates copy, clips must come in pairs */
|
||||||
|
if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_err1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (num_clips && clips_ptr) {
|
||||||
|
clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
|
||||||
|
if (!clips) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out_err1;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = copy_from_user(clips, clips_ptr,
|
||||||
|
num_clips * sizeof(*clips));
|
||||||
|
if (ret)
|
||||||
|
goto out_err2;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fb->funcs->dirty) {
|
||||||
|
ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
|
||||||
|
} else {
|
||||||
|
ret = -ENOSYS;
|
||||||
|
goto out_err2;
|
||||||
|
}
|
||||||
|
|
||||||
|
out_err2:
|
||||||
|
kfree(clips);
|
||||||
|
out_err1:
|
||||||
|
mutex_unlock(&dev->mode_config.mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_fb_release - remove and free the FBs on this file
|
* drm_fb_release - remove and free the FBs on this file
|
||||||
* @filp: file * from the ioctl
|
* @filp: file * from the ioctl
|
||||||
@ -2478,3 +2582,72 @@ out:
|
|||||||
mutex_unlock(&dev->mode_config.mutex);
|
mutex_unlock(&dev->mode_config.mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
||||||
|
void *data, struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
struct drm_mode_crtc_page_flip *page_flip = data;
|
||||||
|
struct drm_mode_object *obj;
|
||||||
|
struct drm_crtc *crtc;
|
||||||
|
struct drm_framebuffer *fb;
|
||||||
|
struct drm_pending_vblank_event *e = NULL;
|
||||||
|
unsigned long flags;
|
||||||
|
int ret = -EINVAL;
|
||||||
|
|
||||||
|
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
|
||||||
|
page_flip->reserved != 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
mutex_lock(&dev->mode_config.mutex);
|
||||||
|
obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
|
||||||
|
if (!obj)
|
||||||
|
goto out;
|
||||||
|
crtc = obj_to_crtc(obj);
|
||||||
|
|
||||||
|
if (crtc->funcs->page_flip == NULL)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
|
||||||
|
if (!obj)
|
||||||
|
goto out;
|
||||||
|
fb = obj_to_fb(obj);
|
||||||
|
|
||||||
|
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
spin_lock_irqsave(&dev->event_lock, flags);
|
||||||
|
if (file_priv->event_space < sizeof e->event) {
|
||||||
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
file_priv->event_space -= sizeof e->event;
|
||||||
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
|
||||||
|
e = kzalloc(sizeof *e, GFP_KERNEL);
|
||||||
|
if (e == NULL) {
|
||||||
|
spin_lock_irqsave(&dev->event_lock, flags);
|
||||||
|
file_priv->event_space += sizeof e->event;
|
||||||
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
e->event.base.type = DRM_EVENT_VBLANK;
|
||||||
|
e->event.base.length = sizeof e->event;
|
||||||
|
e->event.user_data = page_flip->user_data;
|
||||||
|
e->base.event = &e->event.base;
|
||||||
|
e->base.file_priv = file_priv;
|
||||||
|
e->base.destroy =
|
||||||
|
(void (*) (struct drm_pending_event *)) kfree;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = crtc->funcs->page_flip(crtc, fb, e);
|
||||||
|
if (ret) {
|
||||||
|
spin_lock_irqsave(&dev->event_lock, flags);
|
||||||
|
file_priv->event_space += sizeof e->event;
|
||||||
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
kfree(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
mutex_unlock(&dev->mode_config.mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
|||||||
|
|
||||||
count = (*connector_funcs->get_modes)(connector);
|
count = (*connector_funcs->get_modes)(connector);
|
||||||
if (!count) {
|
if (!count) {
|
||||||
count = drm_add_modes_noedid(connector, 800, 600);
|
count = drm_add_modes_noedid(connector, 1024, 768);
|
||||||
if (!count)
|
if (!count)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -28,84 +28,20 @@
|
|||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/i2c.h>
|
#include <linux/i2c.h>
|
||||||
#include "intel_dp.h"
|
#include "drm_dp_helper.h"
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
|
|
||||||
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
|
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
|
||||||
|
|
||||||
#define MODE_I2C_START 1
|
|
||||||
#define MODE_I2C_WRITE 2
|
|
||||||
#define MODE_I2C_READ 4
|
|
||||||
#define MODE_I2C_STOP 8
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
|
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
|
||||||
uint8_t write_byte, uint8_t *read_byte)
|
uint8_t write_byte, uint8_t *read_byte)
|
||||||
{
|
{
|
||||||
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
|
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
|
||||||
uint16_t address = algo_data->address;
|
|
||||||
uint8_t msg[5];
|
|
||||||
uint8_t reply[2];
|
|
||||||
int msg_bytes;
|
|
||||||
int reply_bytes;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Set up the command byte */
|
ret = (*algo_data->aux_ch)(adapter, mode,
|
||||||
if (mode & MODE_I2C_READ)
|
write_byte, read_byte);
|
||||||
msg[0] = AUX_I2C_READ << 4;
|
return ret;
|
||||||
else
|
|
||||||
msg[0] = AUX_I2C_WRITE << 4;
|
|
||||||
|
|
||||||
if (!(mode & MODE_I2C_STOP))
|
|
||||||
msg[0] |= AUX_I2C_MOT << 4;
|
|
||||||
|
|
||||||
msg[1] = address >> 8;
|
|
||||||
msg[2] = address;
|
|
||||||
|
|
||||||
switch (mode) {
|
|
||||||
case MODE_I2C_WRITE:
|
|
||||||
msg[3] = 0;
|
|
||||||
msg[4] = write_byte;
|
|
||||||
msg_bytes = 5;
|
|
||||||
reply_bytes = 1;
|
|
||||||
break;
|
|
||||||
case MODE_I2C_READ:
|
|
||||||
msg[3] = 0;
|
|
||||||
msg_bytes = 4;
|
|
||||||
reply_bytes = 2;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
msg_bytes = 3;
|
|
||||||
reply_bytes = 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
ret = (*algo_data->aux_ch)(adapter,
|
|
||||||
msg, msg_bytes,
|
|
||||||
reply, reply_bytes);
|
|
||||||
if (ret < 0) {
|
|
||||||
DRM_DEBUG("aux_ch failed %d\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
switch (reply[0] & AUX_I2C_REPLY_MASK) {
|
|
||||||
case AUX_I2C_REPLY_ACK:
|
|
||||||
if (mode == MODE_I2C_READ) {
|
|
||||||
*read_byte = reply[1];
|
|
||||||
}
|
|
||||||
return reply_bytes - 1;
|
|
||||||
case AUX_I2C_REPLY_NACK:
|
|
||||||
DRM_DEBUG("aux_ch nack\n");
|
|
||||||
return -EREMOTEIO;
|
|
||||||
case AUX_I2C_REPLY_DEFER:
|
|
||||||
DRM_DEBUG("aux_ch defer\n");
|
|
||||||
udelay(100);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
|
|
||||||
return -EREMOTEIO;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
@ -145,6 +145,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
|
|||||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
|
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
|
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
|
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||||
|
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||||
|
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
|
||||||
};
|
};
|
||||||
|
|
||||||
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
||||||
@ -365,6 +367,29 @@ static void __exit drm_core_exit(void)
|
|||||||
module_init(drm_core_init);
|
module_init(drm_core_init);
|
||||||
module_exit(drm_core_exit);
|
module_exit(drm_core_exit);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copy and IOCTL return string to user space
|
||||||
|
*/
|
||||||
|
static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
|
||||||
|
{
|
||||||
|
int len;
|
||||||
|
|
||||||
|
/* don't overflow userbuf */
|
||||||
|
len = strlen(value);
|
||||||
|
if (len > *buf_len)
|
||||||
|
len = *buf_len;
|
||||||
|
|
||||||
|
/* let userspace know exact length of driver value (which could be
|
||||||
|
* larger than the userspace-supplied buffer) */
|
||||||
|
*buf_len = strlen(value);
|
||||||
|
|
||||||
|
/* finally, try filling in the userbuf */
|
||||||
|
if (len && buf)
|
||||||
|
if (copy_to_user(buf, value, len))
|
||||||
|
return -EFAULT;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get version information
|
* Get version information
|
||||||
*
|
*
|
||||||
@ -380,16 +405,21 @@ static int drm_version(struct drm_device *dev, void *data,
|
|||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
struct drm_version *version = data;
|
struct drm_version *version = data;
|
||||||
int len;
|
int err;
|
||||||
|
|
||||||
version->version_major = dev->driver->major;
|
version->version_major = dev->driver->major;
|
||||||
version->version_minor = dev->driver->minor;
|
version->version_minor = dev->driver->minor;
|
||||||
version->version_patchlevel = dev->driver->patchlevel;
|
version->version_patchlevel = dev->driver->patchlevel;
|
||||||
DRM_COPY(version->name, dev->driver->name);
|
err = drm_copy_field(version->name, &version->name_len,
|
||||||
DRM_COPY(version->date, dev->driver->date);
|
dev->driver->name);
|
||||||
DRM_COPY(version->desc, dev->driver->desc);
|
if (!err)
|
||||||
|
err = drm_copy_field(version->date, &version->date_len,
|
||||||
|
dev->driver->date);
|
||||||
|
if (!err)
|
||||||
|
err = drm_copy_field(version->desc, &version->desc_len,
|
||||||
|
dev->driver->desc);
|
||||||
|
|
||||||
return 0;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -123,18 +123,20 @@ static const u8 edid_header[] = {
|
|||||||
*/
|
*/
|
||||||
static bool edid_is_valid(struct edid *edid)
|
static bool edid_is_valid(struct edid *edid)
|
||||||
{
|
{
|
||||||
int i;
|
int i, score = 0;
|
||||||
u8 csum = 0;
|
u8 csum = 0;
|
||||||
u8 *raw_edid = (u8 *)edid;
|
u8 *raw_edid = (u8 *)edid;
|
||||||
|
|
||||||
if (memcmp(edid->header, edid_header, sizeof(edid_header)))
|
for (i = 0; i < sizeof(edid_header); i++)
|
||||||
|
if (raw_edid[i] == edid_header[i])
|
||||||
|
score++;
|
||||||
|
|
||||||
|
if (score == 8) ;
|
||||||
|
else if (score >= 6) {
|
||||||
|
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
|
||||||
|
memcpy(raw_edid, edid_header, sizeof(edid_header));
|
||||||
|
} else
|
||||||
goto bad;
|
goto bad;
|
||||||
if (edid->version != 1) {
|
|
||||||
DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
|
|
||||||
goto bad;
|
|
||||||
}
|
|
||||||
if (edid->revision > 4)
|
|
||||||
DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
|
|
||||||
|
|
||||||
for (i = 0; i < EDID_LENGTH; i++)
|
for (i = 0; i < EDID_LENGTH; i++)
|
||||||
csum += raw_edid[i];
|
csum += raw_edid[i];
|
||||||
@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid)
|
|||||||
goto bad;
|
goto bad;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (edid->version != 1) {
|
||||||
|
DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
|
||||||
|
goto bad;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (edid->revision > 4)
|
||||||
|
DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
bad:
|
bad:
|
||||||
@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = {
|
|||||||
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
|
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
|
||||||
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
|
||||||
};
|
};
|
||||||
|
static const int drm_num_dmt_modes =
|
||||||
|
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
|
||||||
|
|
||||||
static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
|
static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
|
||||||
int hsize, int vsize, int fresh)
|
int hsize, int vsize, int fresh)
|
||||||
{
|
{
|
||||||
int i, count;
|
int i;
|
||||||
struct drm_display_mode *ptr, *mode;
|
struct drm_display_mode *ptr, *mode;
|
||||||
|
|
||||||
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
|
|
||||||
mode = NULL;
|
mode = NULL;
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < drm_num_dmt_modes; i++) {
|
||||||
ptr = &drm_dmt_modes[i];
|
ptr = &drm_dmt_modes[i];
|
||||||
if (hsize == ptr->hdisplay &&
|
if (hsize == ptr->hdisplay &&
|
||||||
vsize == ptr->vdisplay &&
|
vsize == ptr->vdisplay &&
|
||||||
@ -834,8 +845,165 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
|
|||||||
return modes;
|
return modes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XXX fix this for:
|
||||||
|
* - GTF secondary curve formula
|
||||||
|
* - EDID 1.4 range offsets
|
||||||
|
* - CVT extended bits
|
||||||
|
*/
|
||||||
|
static bool
|
||||||
|
mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
|
||||||
|
{
|
||||||
|
struct detailed_data_monitor_range *range;
|
||||||
|
int hsync, vrefresh;
|
||||||
|
|
||||||
|
range = &timing->data.other_data.data.range;
|
||||||
|
|
||||||
|
hsync = drm_mode_hsync(mode);
|
||||||
|
vrefresh = drm_mode_vrefresh(mode);
|
||||||
|
|
||||||
|
if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
|
||||||
|
/* be forgiving since it's in units of 10MHz */
|
||||||
|
int max_clock = range->pixel_clock_mhz * 10 + 9;
|
||||||
|
max_clock *= 1000;
|
||||||
|
if (mode->clock > max_clock)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
|
||||||
|
* need to account for them.
|
||||||
|
*/
|
||||||
|
static int drm_gtf_modes_for_range(struct drm_connector *connector,
|
||||||
|
struct detailed_timing *timing)
|
||||||
|
{
|
||||||
|
int i, modes = 0;
|
||||||
|
struct drm_display_mode *newmode;
|
||||||
|
struct drm_device *dev = connector->dev;
|
||||||
|
|
||||||
|
for (i = 0; i < drm_num_dmt_modes; i++) {
|
||||||
|
if (mode_in_range(drm_dmt_modes + i, timing)) {
|
||||||
|
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
|
||||||
|
if (newmode) {
|
||||||
|
drm_mode_probed_add(connector, newmode);
|
||||||
|
modes++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return modes;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int drm_cvt_modes(struct drm_connector *connector,
|
||||||
|
struct detailed_timing *timing)
|
||||||
|
{
|
||||||
|
int i, j, modes = 0;
|
||||||
|
struct drm_display_mode *newmode;
|
||||||
|
struct drm_device *dev = connector->dev;
|
||||||
|
struct cvt_timing *cvt;
|
||||||
|
const int rates[] = { 60, 85, 75, 60, 50 };
|
||||||
|
|
||||||
|
for (i = 0; i < 4; i++) {
|
||||||
|
int width, height;
|
||||||
|
cvt = &(timing->data.other_data.data.cvt[i]);
|
||||||
|
|
||||||
|
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
|
||||||
|
switch (cvt->code[1] & 0xc0) {
|
||||||
|
case 0x00:
|
||||||
|
width = height * 4 / 3;
|
||||||
|
break;
|
||||||
|
case 0x40:
|
||||||
|
width = height * 16 / 9;
|
||||||
|
break;
|
||||||
|
case 0x80:
|
||||||
|
width = height * 16 / 10;
|
||||||
|
break;
|
||||||
|
case 0xc0:
|
||||||
|
width = height * 15 / 9;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (j = 1; j < 5; j++) {
|
||||||
|
if (cvt->code[2] & (1 << j)) {
|
||||||
|
newmode = drm_cvt_mode(dev, width, height,
|
||||||
|
rates[j], j == 0,
|
||||||
|
false, false);
|
||||||
|
if (newmode) {
|
||||||
|
drm_mode_probed_add(connector, newmode);
|
||||||
|
modes++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return modes;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int add_detailed_modes(struct drm_connector *connector,
|
||||||
|
struct detailed_timing *timing,
|
||||||
|
struct edid *edid, u32 quirks, int preferred)
|
||||||
|
{
|
||||||
|
int i, modes = 0;
|
||||||
|
struct detailed_non_pixel *data = &timing->data.other_data;
|
||||||
|
int timing_level = standard_timing_level(edid);
|
||||||
|
int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
|
||||||
|
struct drm_display_mode *newmode;
|
||||||
|
struct drm_device *dev = connector->dev;
|
||||||
|
|
||||||
|
if (timing->pixel_clock) {
|
||||||
|
newmode = drm_mode_detailed(dev, edid, timing, quirks);
|
||||||
|
if (!newmode)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (preferred)
|
||||||
|
newmode->type |= DRM_MODE_TYPE_PREFERRED;
|
||||||
|
|
||||||
|
drm_mode_probed_add(connector, newmode);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* other timing types */
|
||||||
|
switch (data->type) {
|
||||||
|
case EDID_DETAIL_MONITOR_RANGE:
|
||||||
|
if (gtf)
|
||||||
|
modes += drm_gtf_modes_for_range(connector, timing);
|
||||||
|
break;
|
||||||
|
case EDID_DETAIL_STD_MODES:
|
||||||
|
/* Six modes per detailed section */
|
||||||
|
for (i = 0; i < 6; i++) {
|
||||||
|
struct std_timing *std;
|
||||||
|
struct drm_display_mode *newmode;
|
||||||
|
|
||||||
|
std = &data->data.timings[i];
|
||||||
|
newmode = drm_mode_std(dev, std, edid->revision,
|
||||||
|
timing_level);
|
||||||
|
if (newmode) {
|
||||||
|
drm_mode_probed_add(connector, newmode);
|
||||||
|
modes++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case EDID_DETAIL_CVT_3BYTE:
|
||||||
|
modes += drm_cvt_modes(connector, timing);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return modes;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* add_detailed_modes - get detailed mode info from EDID data
|
* add_detailed_info - get detailed mode info from EDID data
|
||||||
* @connector: attached connector
|
* @connector: attached connector
|
||||||
* @edid: EDID block to scan
|
* @edid: EDID block to scan
|
||||||
* @quirks: quirks to apply
|
* @quirks: quirks to apply
|
||||||
@ -846,67 +1014,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
|
|||||||
static int add_detailed_info(struct drm_connector *connector,
|
static int add_detailed_info(struct drm_connector *connector,
|
||||||
struct edid *edid, u32 quirks)
|
struct edid *edid, u32 quirks)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = connector->dev;
|
int i, modes = 0;
|
||||||
int i, j, modes = 0;
|
|
||||||
int timing_level;
|
|
||||||
|
|
||||||
timing_level = standard_timing_level(edid);
|
|
||||||
|
|
||||||
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
|
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
|
||||||
struct detailed_timing *timing = &edid->detailed_timings[i];
|
struct detailed_timing *timing = &edid->detailed_timings[i];
|
||||||
struct detailed_non_pixel *data = &timing->data.other_data;
|
int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
|
||||||
struct drm_display_mode *newmode;
|
|
||||||
|
|
||||||
/* X server check is version 1.1 or higher */
|
/* In 1.0, only timings are allowed */
|
||||||
if (edid->version == 1 && edid->revision >= 1 &&
|
if (!timing->pixel_clock && edid->version == 1 &&
|
||||||
!timing->pixel_clock) {
|
edid->revision == 0)
|
||||||
/* Other timing or info */
|
continue;
|
||||||
switch (data->type) {
|
|
||||||
case EDID_DETAIL_MONITOR_SERIAL:
|
|
||||||
break;
|
|
||||||
case EDID_DETAIL_MONITOR_STRING:
|
|
||||||
break;
|
|
||||||
case EDID_DETAIL_MONITOR_RANGE:
|
|
||||||
/* Get monitor range data */
|
|
||||||
break;
|
|
||||||
case EDID_DETAIL_MONITOR_NAME:
|
|
||||||
break;
|
|
||||||
case EDID_DETAIL_MONITOR_CPDATA:
|
|
||||||
break;
|
|
||||||
case EDID_DETAIL_STD_MODES:
|
|
||||||
for (j = 0; j < 6; i++) {
|
|
||||||
struct std_timing *std;
|
|
||||||
struct drm_display_mode *newmode;
|
|
||||||
|
|
||||||
std = &data->data.timings[j];
|
modes += add_detailed_modes(connector, timing, edid, quirks,
|
||||||
newmode = drm_mode_std(dev, std,
|
preferred);
|
||||||
edid->revision,
|
|
||||||
timing_level);
|
|
||||||
if (newmode) {
|
|
||||||
drm_mode_probed_add(connector, newmode);
|
|
||||||
modes++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
newmode = drm_mode_detailed(dev, edid, timing, quirks);
|
|
||||||
if (!newmode)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* First detailed mode is preferred */
|
|
||||||
if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
|
|
||||||
newmode->type |= DRM_MODE_TYPE_PREFERRED;
|
|
||||||
drm_mode_probed_add(connector, newmode);
|
|
||||||
|
|
||||||
modes++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return modes;
|
return modes;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* add_detailed_mode_eedid - get detailed mode info from addtional timing
|
* add_detailed_mode_eedid - get detailed mode info from addtional timing
|
||||||
* EDID block
|
* EDID block
|
||||||
@ -920,12 +1045,9 @@ static int add_detailed_info(struct drm_connector *connector,
|
|||||||
static int add_detailed_info_eedid(struct drm_connector *connector,
|
static int add_detailed_info_eedid(struct drm_connector *connector,
|
||||||
struct edid *edid, u32 quirks)
|
struct edid *edid, u32 quirks)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = connector->dev;
|
int i, modes = 0;
|
||||||
int i, j, modes = 0;
|
|
||||||
char *edid_ext = NULL;
|
char *edid_ext = NULL;
|
||||||
struct detailed_timing *timing;
|
struct detailed_timing *timing;
|
||||||
struct detailed_non_pixel *data;
|
|
||||||
struct drm_display_mode *newmode;
|
|
||||||
int edid_ext_num;
|
int edid_ext_num;
|
||||||
int start_offset, end_offset;
|
int start_offset, end_offset;
|
||||||
int timing_level;
|
int timing_level;
|
||||||
@ -976,51 +1098,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
|
|||||||
for (i = start_offset; i < end_offset;
|
for (i = start_offset; i < end_offset;
|
||||||
i += sizeof(struct detailed_timing)) {
|
i += sizeof(struct detailed_timing)) {
|
||||||
timing = (struct detailed_timing *)(edid_ext + i);
|
timing = (struct detailed_timing *)(edid_ext + i);
|
||||||
data = &timing->data.other_data;
|
modes += add_detailed_modes(connector, timing, edid, quirks, 0);
|
||||||
/* Detailed mode timing */
|
|
||||||
if (timing->pixel_clock) {
|
|
||||||
newmode = drm_mode_detailed(dev, edid, timing, quirks);
|
|
||||||
if (!newmode)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
drm_mode_probed_add(connector, newmode);
|
|
||||||
|
|
||||||
modes++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Other timing or info */
|
|
||||||
switch (data->type) {
|
|
||||||
case EDID_DETAIL_MONITOR_SERIAL:
|
|
||||||
break;
|
|
||||||
case EDID_DETAIL_MONITOR_STRING:
|
|
||||||
break;
|
|
||||||
case EDID_DETAIL_MONITOR_RANGE:
|
|
||||||
/* Get monitor range data */
|
|
||||||
break;
|
|
||||||
case EDID_DETAIL_MONITOR_NAME:
|
|
||||||
break;
|
|
||||||
case EDID_DETAIL_MONITOR_CPDATA:
|
|
||||||
break;
|
|
||||||
case EDID_DETAIL_STD_MODES:
|
|
||||||
/* Five modes per detailed section */
|
|
||||||
for (j = 0; j < 5; i++) {
|
|
||||||
struct std_timing *std;
|
|
||||||
struct drm_display_mode *newmode;
|
|
||||||
|
|
||||||
std = &data->data.timings[j];
|
|
||||||
newmode = drm_mode_std(dev, std,
|
|
||||||
edid->revision,
|
|
||||||
timing_level);
|
|
||||||
if (newmode) {
|
|
||||||
drm_mode_probed_add(connector, newmode);
|
|
||||||
modes++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return modes;
|
return modes;
|
||||||
@ -1066,19 +1144,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
|
|||||||
struct i2c_adapter *adapter,
|
struct i2c_adapter *adapter,
|
||||||
char *buf, int len)
|
char *buf, int len)
|
||||||
{
|
{
|
||||||
int ret;
|
int i;
|
||||||
|
|
||||||
ret = drm_do_probe_ddc_edid(adapter, buf, len);
|
for (i = 0; i < 4; i++) {
|
||||||
if (ret != 0) {
|
if (drm_do_probe_ddc_edid(adapter, buf, len))
|
||||||
goto end;
|
return -1;
|
||||||
|
if (edid_is_valid((struct edid *)buf))
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
if (!edid_is_valid((struct edid *)buf)) {
|
|
||||||
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
|
/* repeated checksum failures; warn, but carry on */
|
||||||
drm_get_connector_name(connector));
|
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
|
||||||
ret = -1;
|
drm_get_connector_name(connector));
|
||||||
}
|
return -1;
|
||||||
end:
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1296,6 +1374,8 @@ int drm_add_modes_noedid(struct drm_connector *connector,
|
|||||||
ptr->vdisplay > vdisplay)
|
ptr->vdisplay > vdisplay)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
if (drm_mode_vrefresh(ptr) > 61)
|
||||||
|
continue;
|
||||||
mode = drm_mode_duplicate(dev, ptr);
|
mode = drm_mode_duplicate(dev, ptr);
|
||||||
if (mode) {
|
if (mode) {
|
||||||
drm_mode_probed_add(connector, mode);
|
drm_mode_probed_add(connector, mode);
|
||||||
|
@ -373,11 +373,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
|
|||||||
mutex_unlock(&dev->mode_config.mutex);
|
mutex_unlock(&dev->mode_config.mutex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (dpms_mode == DRM_MODE_DPMS_OFF) {
|
mutex_lock(&dev->mode_config.mutex);
|
||||||
mutex_lock(&dev->mode_config.mutex);
|
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||||
crtc_funcs->dpms(crtc, dpms_mode);
|
mutex_unlock(&dev->mode_config.mutex);
|
||||||
mutex_unlock(&dev->mode_config.mutex);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -385,18 +383,23 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
|
|||||||
int drm_fb_helper_blank(int blank, struct fb_info *info)
|
int drm_fb_helper_blank(int blank, struct fb_info *info)
|
||||||
{
|
{
|
||||||
switch (blank) {
|
switch (blank) {
|
||||||
|
/* Display: On; HSync: On, VSync: On */
|
||||||
case FB_BLANK_UNBLANK:
|
case FB_BLANK_UNBLANK:
|
||||||
drm_fb_helper_on(info);
|
drm_fb_helper_on(info);
|
||||||
break;
|
break;
|
||||||
|
/* Display: Off; HSync: On, VSync: On */
|
||||||
case FB_BLANK_NORMAL:
|
case FB_BLANK_NORMAL:
|
||||||
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
|
drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
|
||||||
break;
|
break;
|
||||||
|
/* Display: Off; HSync: Off, VSync: On */
|
||||||
case FB_BLANK_HSYNC_SUSPEND:
|
case FB_BLANK_HSYNC_SUSPEND:
|
||||||
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
|
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
|
||||||
break;
|
break;
|
||||||
|
/* Display: Off; HSync: On, VSync: Off */
|
||||||
case FB_BLANK_VSYNC_SUSPEND:
|
case FB_BLANK_VSYNC_SUSPEND:
|
||||||
drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
|
drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
|
||||||
break;
|
break;
|
||||||
|
/* Display: Off; HSync: Off, VSync: Off */
|
||||||
case FB_BLANK_POWERDOWN:
|
case FB_BLANK_POWERDOWN:
|
||||||
drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
|
drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
|
||||||
break;
|
break;
|
||||||
@ -905,8 +908,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
|
|||||||
|
|
||||||
if (new_fb) {
|
if (new_fb) {
|
||||||
info->var.pixclock = 0;
|
info->var.pixclock = 0;
|
||||||
if (register_framebuffer(info) < 0)
|
ret = fb_alloc_cmap(&info->cmap, crtc->gamma_size, 0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
if (register_framebuffer(info) < 0) {
|
||||||
|
fb_dealloc_cmap(&info->cmap);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
drm_fb_helper_set_par(info);
|
drm_fb_helper_set_par(info);
|
||||||
}
|
}
|
||||||
@ -936,6 +944,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
|
|||||||
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
|
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
|
||||||
}
|
}
|
||||||
drm_fb_helper_crtc_free(helper);
|
drm_fb_helper_crtc_free(helper);
|
||||||
|
fb_dealloc_cmap(&helper->fb->fbdev->cmap);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_fb_helper_free);
|
EXPORT_SYMBOL(drm_fb_helper_free);
|
||||||
|
|
||||||
|
@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&priv->lhead);
|
INIT_LIST_HEAD(&priv->lhead);
|
||||||
INIT_LIST_HEAD(&priv->fbs);
|
INIT_LIST_HEAD(&priv->fbs);
|
||||||
|
INIT_LIST_HEAD(&priv->event_list);
|
||||||
|
init_waitqueue_head(&priv->event_wait);
|
||||||
|
priv->event_space = 4096; /* set aside 4k for event buffer */
|
||||||
|
|
||||||
if (dev->driver->driver_features & DRIVER_GEM)
|
if (dev->driver->driver_features & DRIVER_GEM)
|
||||||
drm_gem_open(dev, priv);
|
drm_gem_open(dev, priv);
|
||||||
@ -297,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
|||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
if (dev->driver->master_set) {
|
||||||
|
ret = dev->driver->master_set(dev, priv, true);
|
||||||
|
if (ret) {
|
||||||
|
/* drop both references if this fails */
|
||||||
|
drm_master_put(&priv->minor->master);
|
||||||
|
drm_master_put(&priv->master);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
goto out_free;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
} else {
|
} else {
|
||||||
/* get a reference to the master */
|
/* get a reference to the master */
|
||||||
priv->master = drm_master_get(priv->minor->master);
|
priv->master = drm_master_get(priv->minor->master);
|
||||||
@ -413,6 +428,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void drm_events_release(struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = file_priv->minor->dev;
|
||||||
|
struct drm_pending_event *e, *et;
|
||||||
|
struct drm_pending_vblank_event *v, *vt;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dev->event_lock, flags);
|
||||||
|
|
||||||
|
/* Remove pending flips */
|
||||||
|
list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
|
||||||
|
if (v->base.file_priv == file_priv) {
|
||||||
|
list_del(&v->base.link);
|
||||||
|
drm_vblank_put(dev, v->pipe);
|
||||||
|
v->base.destroy(&v->base);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Remove unconsumed events */
|
||||||
|
list_for_each_entry_safe(e, et, &file_priv->event_list, link)
|
||||||
|
e->destroy(e);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Release file.
|
* Release file.
|
||||||
*
|
*
|
||||||
@ -451,6 +490,8 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||||||
if (file_priv->minor->master)
|
if (file_priv->minor->master)
|
||||||
drm_master_release(dev, filp);
|
drm_master_release(dev, filp);
|
||||||
|
|
||||||
|
drm_events_release(file_priv);
|
||||||
|
|
||||||
if (dev->driver->driver_features & DRIVER_GEM)
|
if (dev->driver->driver_features & DRIVER_GEM)
|
||||||
drm_gem_release(dev, file_priv);
|
drm_gem_release(dev, file_priv);
|
||||||
|
|
||||||
@ -504,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||||||
|
|
||||||
if (file_priv->minor->master == file_priv->master) {
|
if (file_priv->minor->master == file_priv->master) {
|
||||||
/* drop the reference held my the minor */
|
/* drop the reference held my the minor */
|
||||||
|
if (dev->driver->master_drop)
|
||||||
|
dev->driver->master_drop(dev, file_priv, true);
|
||||||
drm_master_put(&file_priv->minor->master);
|
drm_master_put(&file_priv->minor->master);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -544,9 +587,74 @@ int drm_release(struct inode *inode, struct file *filp)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_release);
|
EXPORT_SYMBOL(drm_release);
|
||||||
|
|
||||||
/** No-op. */
|
static bool
|
||||||
|
drm_dequeue_event(struct drm_file *file_priv,
|
||||||
|
size_t total, size_t max, struct drm_pending_event **out)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = file_priv->minor->dev;
|
||||||
|
struct drm_pending_event *e;
|
||||||
|
unsigned long flags;
|
||||||
|
bool ret = false;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dev->event_lock, flags);
|
||||||
|
|
||||||
|
*out = NULL;
|
||||||
|
if (list_empty(&file_priv->event_list))
|
||||||
|
goto out;
|
||||||
|
e = list_first_entry(&file_priv->event_list,
|
||||||
|
struct drm_pending_event, link);
|
||||||
|
if (e->event->length + total > max)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
file_priv->event_space += e->event->length;
|
||||||
|
list_del(&e->link);
|
||||||
|
*out = e;
|
||||||
|
ret = true;
|
||||||
|
|
||||||
|
out:
|
||||||
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ssize_t drm_read(struct file *filp, char __user *buffer,
|
||||||
|
size_t count, loff_t *offset)
|
||||||
|
{
|
||||||
|
struct drm_file *file_priv = filp->private_data;
|
||||||
|
struct drm_pending_event *e;
|
||||||
|
size_t total;
|
||||||
|
ssize_t ret;
|
||||||
|
|
||||||
|
ret = wait_event_interruptible(file_priv->event_wait,
|
||||||
|
!list_empty(&file_priv->event_list));
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
total = 0;
|
||||||
|
while (drm_dequeue_event(file_priv, total, count, &e)) {
|
||||||
|
if (copy_to_user(buffer + total,
|
||||||
|
e->event, e->event->length)) {
|
||||||
|
total = -EFAULT;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
total += e->event->length;
|
||||||
|
e->destroy(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return total;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_read);
|
||||||
|
|
||||||
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
|
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
|
||||||
{
|
{
|
||||||
return 0;
|
struct drm_file *file_priv = filp->private_data;
|
||||||
|
unsigned int mask = 0;
|
||||||
|
|
||||||
|
poll_wait(filp, &file_priv->event_wait, wait);
|
||||||
|
|
||||||
|
if (!list_empty(&file_priv->event_list))
|
||||||
|
mask |= POLLIN | POLLRDNORM;
|
||||||
|
|
||||||
|
return mask;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_poll);
|
EXPORT_SYMBOL(drm_poll);
|
||||||
|
@ -550,6 +550,63 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
|
||||||
|
union drm_wait_vblank *vblwait,
|
||||||
|
struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
struct drm_pending_vblank_event *e;
|
||||||
|
struct timeval now;
|
||||||
|
unsigned long flags;
|
||||||
|
unsigned int seq;
|
||||||
|
|
||||||
|
e = kzalloc(sizeof *e, GFP_KERNEL);
|
||||||
|
if (e == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
e->pipe = pipe;
|
||||||
|
e->event.base.type = DRM_EVENT_VBLANK;
|
||||||
|
e->event.base.length = sizeof e->event;
|
||||||
|
e->event.user_data = vblwait->request.signal;
|
||||||
|
e->base.event = &e->event.base;
|
||||||
|
e->base.file_priv = file_priv;
|
||||||
|
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
|
||||||
|
|
||||||
|
do_gettimeofday(&now);
|
||||||
|
spin_lock_irqsave(&dev->event_lock, flags);
|
||||||
|
|
||||||
|
if (file_priv->event_space < sizeof e->event) {
|
||||||
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
kfree(e);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
file_priv->event_space -= sizeof e->event;
|
||||||
|
seq = drm_vblank_count(dev, pipe);
|
||||||
|
if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
|
||||||
|
(seq - vblwait->request.sequence) <= (1 << 23)) {
|
||||||
|
vblwait->request.sequence = seq + 1;
|
||||||
|
vblwait->reply.sequence = vblwait->request.sequence;
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
|
||||||
|
vblwait->request.sequence, seq, pipe);
|
||||||
|
|
||||||
|
e->event.sequence = vblwait->request.sequence;
|
||||||
|
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
|
||||||
|
e->event.tv_sec = now.tv_sec;
|
||||||
|
e->event.tv_usec = now.tv_usec;
|
||||||
|
drm_vblank_put(dev, e->pipe);
|
||||||
|
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
|
||||||
|
wake_up_interruptible(&e->base.file_priv->event_wait);
|
||||||
|
} else {
|
||||||
|
list_add_tail(&e->base.link, &dev->vblank_event_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wait for VBLANK.
|
* Wait for VBLANK.
|
||||||
*
|
*
|
||||||
@ -609,6 +666,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (flags & _DRM_VBLANK_EVENT)
|
||||||
|
return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
|
||||||
|
|
||||||
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
|
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
|
||||||
(seq - vblwait->request.sequence) <= (1<<23)) {
|
(seq - vblwait->request.sequence) <= (1<<23)) {
|
||||||
vblwait->request.sequence = seq + 1;
|
vblwait->request.sequence = seq + 1;
|
||||||
@ -641,6 +701,38 @@ done:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void drm_handle_vblank_events(struct drm_device *dev, int crtc)
|
||||||
|
{
|
||||||
|
struct drm_pending_vblank_event *e, *t;
|
||||||
|
struct timeval now;
|
||||||
|
unsigned long flags;
|
||||||
|
unsigned int seq;
|
||||||
|
|
||||||
|
do_gettimeofday(&now);
|
||||||
|
seq = drm_vblank_count(dev, crtc);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dev->event_lock, flags);
|
||||||
|
|
||||||
|
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
|
||||||
|
if (e->pipe != crtc)
|
||||||
|
continue;
|
||||||
|
if ((seq - e->event.sequence) > (1<<23))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
DRM_DEBUG("vblank event on %d, current %d\n",
|
||||||
|
e->event.sequence, seq);
|
||||||
|
|
||||||
|
e->event.sequence = seq;
|
||||||
|
e->event.tv_sec = now.tv_sec;
|
||||||
|
e->event.tv_usec = now.tv_usec;
|
||||||
|
drm_vblank_put(dev, e->pipe);
|
||||||
|
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
|
||||||
|
wake_up_interruptible(&e->base.file_priv->event_wait);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_handle_vblank - handle a vblank event
|
* drm_handle_vblank - handle a vblank event
|
||||||
* @dev: DRM device
|
* @dev: DRM device
|
||||||
@ -651,7 +743,11 @@ done:
|
|||||||
*/
|
*/
|
||||||
void drm_handle_vblank(struct drm_device *dev, int crtc)
|
void drm_handle_vblank(struct drm_device *dev, int crtc)
|
||||||
{
|
{
|
||||||
|
if (!dev->num_crtcs)
|
||||||
|
return;
|
||||||
|
|
||||||
atomic_inc(&dev->_vblank_count[crtc]);
|
atomic_inc(&dev->_vblank_count[crtc]);
|
||||||
DRM_WAKEUP(&dev->vbl_queue[crtc]);
|
DRM_WAKEUP(&dev->vbl_queue[crtc]);
|
||||||
|
drm_handle_vblank_events(dev, crtc);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_handle_vblank);
|
EXPORT_SYMBOL(drm_handle_vblank);
|
||||||
|
@ -395,7 +395,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
|
|||||||
else
|
else
|
||||||
total_used += entry->size;
|
total_used += entry->size;
|
||||||
}
|
}
|
||||||
seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used);
|
seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mm_dump_table);
|
EXPORT_SYMBOL(drm_mm_dump_table);
|
||||||
|
@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mode_height);
|
EXPORT_SYMBOL(drm_mode_height);
|
||||||
|
|
||||||
|
/** drm_mode_hsync - get the hsync of a mode
|
||||||
|
* @mode: mode
|
||||||
|
*
|
||||||
|
* LOCKING:
|
||||||
|
* None.
|
||||||
|
*
|
||||||
|
* Return @modes's hsync rate in kHz, rounded to the nearest int.
|
||||||
|
*/
|
||||||
|
int drm_mode_hsync(struct drm_display_mode *mode)
|
||||||
|
{
|
||||||
|
unsigned int calc_val;
|
||||||
|
|
||||||
|
if (mode->hsync)
|
||||||
|
return mode->hsync;
|
||||||
|
|
||||||
|
if (mode->htotal < 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
|
||||||
|
calc_val += 500; /* round to 1000Hz */
|
||||||
|
calc_val /= 1000; /* truncate to kHz */
|
||||||
|
|
||||||
|
return calc_val;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_mode_hsync);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_mode_vrefresh - get the vrefresh of a mode
|
* drm_mode_vrefresh - get the vrefresh of a mode
|
||||||
* @mode: mode
|
* @mode: mode
|
||||||
@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height);
|
|||||||
* LOCKING:
|
* LOCKING:
|
||||||
* None.
|
* None.
|
||||||
*
|
*
|
||||||
* Return @mode's vrefresh rate or calculate it if necessary.
|
* Return @mode's vrefresh rate in Hz or calculate it if necessary.
|
||||||
*
|
*
|
||||||
* FIXME: why is this needed? shouldn't vrefresh be set already?
|
* FIXME: why is this needed? shouldn't vrefresh be set already?
|
||||||
*
|
*
|
||||||
|
@ -128,6 +128,7 @@ struct drm_master *drm_master_get(struct drm_master *master)
|
|||||||
kref_get(&master->refcount);
|
kref_get(&master->refcount);
|
||||||
return master;
|
return master;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_master_get);
|
||||||
|
|
||||||
static void drm_master_destroy(struct kref *kref)
|
static void drm_master_destroy(struct kref *kref)
|
||||||
{
|
{
|
||||||
@ -170,10 +171,13 @@ void drm_master_put(struct drm_master **master)
|
|||||||
kref_put(&(*master)->refcount, drm_master_destroy);
|
kref_put(&(*master)->refcount, drm_master_destroy);
|
||||||
*master = NULL;
|
*master = NULL;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_master_put);
|
||||||
|
|
||||||
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (file_priv->is_master)
|
if (file_priv->is_master)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -188,6 +192,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
|||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
file_priv->minor->master = drm_master_get(file_priv->master);
|
file_priv->minor->master = drm_master_get(file_priv->master);
|
||||||
file_priv->is_master = 1;
|
file_priv->is_master = 1;
|
||||||
|
if (dev->driver->master_set) {
|
||||||
|
ret = dev->driver->master_set(dev, file_priv, false);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
file_priv->is_master = 0;
|
||||||
|
drm_master_put(&file_priv->minor->master);
|
||||||
|
}
|
||||||
|
}
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,6 +215,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
if (dev->driver->master_drop)
|
||||||
|
dev->driver->master_drop(dev, file_priv, false);
|
||||||
drm_master_put(&file_priv->minor->master);
|
drm_master_put(&file_priv->minor->master);
|
||||||
file_priv->is_master = 0;
|
file_priv->is_master = 0;
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
@ -220,9 +233,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
|
|||||||
INIT_LIST_HEAD(&dev->ctxlist);
|
INIT_LIST_HEAD(&dev->ctxlist);
|
||||||
INIT_LIST_HEAD(&dev->vmalist);
|
INIT_LIST_HEAD(&dev->vmalist);
|
||||||
INIT_LIST_HEAD(&dev->maplist);
|
INIT_LIST_HEAD(&dev->maplist);
|
||||||
|
INIT_LIST_HEAD(&dev->vblank_event_list);
|
||||||
|
|
||||||
spin_lock_init(&dev->count_lock);
|
spin_lock_init(&dev->count_lock);
|
||||||
spin_lock_init(&dev->drw_lock);
|
spin_lock_init(&dev->drw_lock);
|
||||||
|
spin_lock_init(&dev->event_lock);
|
||||||
init_timer(&dev->timer);
|
init_timer(&dev->timer);
|
||||||
mutex_init(&dev->struct_mutex);
|
mutex_init(&dev->struct_mutex);
|
||||||
mutex_init(&dev->ctxlist_mutex);
|
mutex_init(&dev->ctxlist_mutex);
|
||||||
|
@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
|
|||||||
intel_lvds.o \
|
intel_lvds.o \
|
||||||
intel_bios.o \
|
intel_bios.o \
|
||||||
intel_dp.o \
|
intel_dp.o \
|
||||||
intel_dp_i2c.o \
|
|
||||||
intel_hdmi.o \
|
intel_hdmi.o \
|
||||||
intel_sdvo.o \
|
intel_sdvo.o \
|
||||||
intel_modes.o \
|
intel_modes.o \
|
||||||
|
@ -333,6 +333,7 @@ static struct drm_driver driver = {
|
|||||||
.mmap = drm_gem_mmap,
|
.mmap = drm_gem_mmap,
|
||||||
.poll = drm_poll,
|
.poll = drm_poll,
|
||||||
.fasync = drm_fasync,
|
.fasync = drm_fasync,
|
||||||
|
.read = drm_read,
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
.compat_ioctl = i915_compat_ioctl,
|
.compat_ioctl = i915_compat_ioctl,
|
||||||
#endif
|
#endif
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
#include "intel_drv.h"
|
#include "intel_drv.h"
|
||||||
#include "i915_drm.h"
|
#include "i915_drm.h"
|
||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
#include "intel_dp.h"
|
#include "drm_dp_helper.h"
|
||||||
|
|
||||||
#include "drm_crtc_helper.h"
|
#include "drm_crtc_helper.h"
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
#include "intel_drv.h"
|
#include "intel_drv.h"
|
||||||
#include "i915_drm.h"
|
#include "i915_drm.h"
|
||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
#include "intel_dp.h"
|
#include "drm_dp_helper.h"
|
||||||
|
|
||||||
#define DP_LINK_STATUS_SIZE 6
|
#define DP_LINK_STATUS_SIZE 6
|
||||||
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
|
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
|
||||||
@ -382,17 +382,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
|
intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
|
||||||
uint8_t *send, int send_bytes,
|
uint8_t write_byte, uint8_t *read_byte)
|
||||||
uint8_t *recv, int recv_bytes)
|
|
||||||
{
|
{
|
||||||
|
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
|
||||||
struct intel_dp_priv *dp_priv = container_of(adapter,
|
struct intel_dp_priv *dp_priv = container_of(adapter,
|
||||||
struct intel_dp_priv,
|
struct intel_dp_priv,
|
||||||
adapter);
|
adapter);
|
||||||
struct intel_output *intel_output = dp_priv->intel_output;
|
struct intel_output *intel_output = dp_priv->intel_output;
|
||||||
|
uint16_t address = algo_data->address;
|
||||||
|
uint8_t msg[5];
|
||||||
|
uint8_t reply[2];
|
||||||
|
int msg_bytes;
|
||||||
|
int reply_bytes;
|
||||||
|
int ret;
|
||||||
|
|
||||||
return intel_dp_aux_ch(intel_output,
|
/* Set up the command byte */
|
||||||
send, send_bytes, recv, recv_bytes);
|
if (mode & MODE_I2C_READ)
|
||||||
|
msg[0] = AUX_I2C_READ << 4;
|
||||||
|
else
|
||||||
|
msg[0] = AUX_I2C_WRITE << 4;
|
||||||
|
|
||||||
|
if (!(mode & MODE_I2C_STOP))
|
||||||
|
msg[0] |= AUX_I2C_MOT << 4;
|
||||||
|
|
||||||
|
msg[1] = address >> 8;
|
||||||
|
msg[2] = address;
|
||||||
|
|
||||||
|
switch (mode) {
|
||||||
|
case MODE_I2C_WRITE:
|
||||||
|
msg[3] = 0;
|
||||||
|
msg[4] = write_byte;
|
||||||
|
msg_bytes = 5;
|
||||||
|
reply_bytes = 1;
|
||||||
|
break;
|
||||||
|
case MODE_I2C_READ:
|
||||||
|
msg[3] = 0;
|
||||||
|
msg_bytes = 4;
|
||||||
|
reply_bytes = 2;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
msg_bytes = 3;
|
||||||
|
reply_bytes = 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
ret = intel_dp_aux_ch(intel_output,
|
||||||
|
msg, msg_bytes,
|
||||||
|
reply, reply_bytes);
|
||||||
|
if (ret < 0) {
|
||||||
|
DRM_DEBUG("aux_ch failed %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
switch (reply[0] & AUX_I2C_REPLY_MASK) {
|
||||||
|
case AUX_I2C_REPLY_ACK:
|
||||||
|
if (mode == MODE_I2C_READ) {
|
||||||
|
*read_byte = reply[1];
|
||||||
|
}
|
||||||
|
return reply_bytes - 1;
|
||||||
|
case AUX_I2C_REPLY_NACK:
|
||||||
|
DRM_DEBUG("aux_ch nack\n");
|
||||||
|
return -EREMOTEIO;
|
||||||
|
case AUX_I2C_REPLY_DEFER:
|
||||||
|
DRM_DEBUG("aux_ch defer\n");
|
||||||
|
udelay(100);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
|
||||||
|
return -EREMOTEIO;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
ccflags-y := -Iinclude/drm
|
ccflags-y := -Iinclude/drm
|
||||||
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
|
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
|
||||||
ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
|
ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
|
||||||
|
ttm_object.o ttm_lock.o ttm_execbuf_util.o
|
||||||
|
|
||||||
obj-$(CONFIG_DRM_TTM) += ttm.o
|
obj-$(CONFIG_DRM_TTM) += ttm.o
|
||||||
|
@ -275,9 +275,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
|
|||||||
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
|
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
|
||||||
page_flags | TTM_PAGE_FLAG_USER,
|
page_flags | TTM_PAGE_FLAG_USER,
|
||||||
glob->dummy_read_page);
|
glob->dummy_read_page);
|
||||||
if (unlikely(bo->ttm == NULL))
|
if (unlikely(bo->ttm == NULL)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
ret = ttm_tt_set_user(bo->ttm, current,
|
ret = ttm_tt_set_user(bo->ttm, current,
|
||||||
bo->buffer_start, bo->num_pages);
|
bo->buffer_start, bo->num_pages);
|
||||||
|
@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
|
|||||||
#endif
|
#endif
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_io_prot);
|
||||||
|
|
||||||
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
|
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
|
||||||
unsigned long bus_base,
|
unsigned long bus_base,
|
||||||
|
117
drivers/gpu/drm/ttm/ttm_execbuf_util.c
Normal file
117
drivers/gpu/drm/ttm/ttm_execbuf_util.c
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
|
||||||
|
#include "ttm/ttm_execbuf_util.h"
|
||||||
|
#include "ttm/ttm_bo_driver.h"
|
||||||
|
#include "ttm/ttm_placement.h"
|
||||||
|
#include <linux/wait.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
|
||||||
|
void ttm_eu_backoff_reservation(struct list_head *list)
|
||||||
|
{
|
||||||
|
struct ttm_validate_buffer *entry;
|
||||||
|
|
||||||
|
list_for_each_entry(entry, list, head) {
|
||||||
|
struct ttm_buffer_object *bo = entry->bo;
|
||||||
|
if (!entry->reserved)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
entry->reserved = false;
|
||||||
|
ttm_bo_unreserve(bo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reserve buffers for validation.
|
||||||
|
*
|
||||||
|
* If a buffer in the list is marked for CPU access, we back off and
|
||||||
|
* wait for that buffer to become free for GPU access.
|
||||||
|
*
|
||||||
|
* If a buffer is reserved for another validation, the validator with
|
||||||
|
* the highest validation sequence backs off and waits for that buffer
|
||||||
|
* to become unreserved. This prevents deadlocks when validating multiple
|
||||||
|
* buffers in different orders.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
|
||||||
|
{
|
||||||
|
struct ttm_validate_buffer *entry;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
retry:
|
||||||
|
list_for_each_entry(entry, list, head) {
|
||||||
|
struct ttm_buffer_object *bo = entry->bo;
|
||||||
|
|
||||||
|
entry->reserved = false;
|
||||||
|
ret = ttm_bo_reserve(bo, true, false, true, val_seq);
|
||||||
|
if (ret != 0) {
|
||||||
|
ttm_eu_backoff_reservation(list);
|
||||||
|
if (ret == -EAGAIN) {
|
||||||
|
ret = ttm_bo_wait_unreserved(bo, true);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
goto retry;
|
||||||
|
} else
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
entry->reserved = true;
|
||||||
|
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
|
||||||
|
ttm_eu_backoff_reservation(list);
|
||||||
|
ret = ttm_bo_wait_cpu(bo, false);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
|
||||||
|
|
||||||
|
void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
|
||||||
|
{
|
||||||
|
struct ttm_validate_buffer *entry;
|
||||||
|
|
||||||
|
list_for_each_entry(entry, list, head) {
|
||||||
|
struct ttm_buffer_object *bo = entry->bo;
|
||||||
|
struct ttm_bo_driver *driver = bo->bdev->driver;
|
||||||
|
void *old_sync_obj;
|
||||||
|
|
||||||
|
spin_lock(&bo->lock);
|
||||||
|
old_sync_obj = bo->sync_obj;
|
||||||
|
bo->sync_obj = driver->sync_obj_ref(sync_obj);
|
||||||
|
bo->sync_obj_arg = entry->new_sync_obj_arg;
|
||||||
|
spin_unlock(&bo->lock);
|
||||||
|
ttm_bo_unreserve(bo);
|
||||||
|
entry->reserved = false;
|
||||||
|
if (old_sync_obj)
|
||||||
|
driver->sync_obj_unref(&old_sync_obj);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
|
311
drivers/gpu/drm/ttm/ttm_lock.c
Normal file
311
drivers/gpu/drm/ttm/ttm_lock.c
Normal file
@ -0,0 +1,311 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "ttm/ttm_lock.h"
|
||||||
|
#include "ttm/ttm_module.h"
|
||||||
|
#include <asm/atomic.h>
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/wait.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
|
||||||
|
#define TTM_WRITE_LOCK_PENDING (1 << 0)
|
||||||
|
#define TTM_VT_LOCK_PENDING (1 << 1)
|
||||||
|
#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
|
||||||
|
#define TTM_VT_LOCK (1 << 3)
|
||||||
|
#define TTM_SUSPEND_LOCK (1 << 4)
|
||||||
|
|
||||||
|
void ttm_lock_init(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
spin_lock_init(&lock->lock);
|
||||||
|
init_waitqueue_head(&lock->queue);
|
||||||
|
lock->rw = 0;
|
||||||
|
lock->flags = 0;
|
||||||
|
lock->kill_takers = false;
|
||||||
|
lock->signal = SIGKILL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_lock_init);
|
||||||
|
|
||||||
|
void ttm_read_unlock(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
if (--lock->rw == 0)
|
||||||
|
wake_up_all(&lock->queue);
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_read_unlock);
|
||||||
|
|
||||||
|
static bool __ttm_read_lock(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
bool locked = false;
|
||||||
|
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
if (unlikely(lock->kill_takers)) {
|
||||||
|
send_sig(lock->signal, current, 0);
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (lock->rw >= 0 && lock->flags == 0) {
|
||||||
|
++lock->rw;
|
||||||
|
locked = true;
|
||||||
|
}
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
return locked;
|
||||||
|
}
|
||||||
|
|
||||||
|
int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (interruptible)
|
||||||
|
ret = wait_event_interruptible(lock->queue,
|
||||||
|
__ttm_read_lock(lock));
|
||||||
|
else
|
||||||
|
wait_event(lock->queue, __ttm_read_lock(lock));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_read_lock);
|
||||||
|
|
||||||
|
static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
|
||||||
|
{
|
||||||
|
bool block = true;
|
||||||
|
|
||||||
|
*locked = false;
|
||||||
|
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
if (unlikely(lock->kill_takers)) {
|
||||||
|
send_sig(lock->signal, current, 0);
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (lock->rw >= 0 && lock->flags == 0) {
|
||||||
|
++lock->rw;
|
||||||
|
block = false;
|
||||||
|
*locked = true;
|
||||||
|
} else if (lock->flags == 0) {
|
||||||
|
block = false;
|
||||||
|
}
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
|
||||||
|
return !block;
|
||||||
|
}
|
||||||
|
|
||||||
|
int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
bool locked;
|
||||||
|
|
||||||
|
if (interruptible)
|
||||||
|
ret = wait_event_interruptible
|
||||||
|
(lock->queue, __ttm_read_trylock(lock, &locked));
|
||||||
|
else
|
||||||
|
wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
|
||||||
|
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
BUG_ON(locked);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (locked) ? 0 : -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ttm_write_unlock(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
lock->rw = 0;
|
||||||
|
wake_up_all(&lock->queue);
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_write_unlock);
|
||||||
|
|
||||||
|
static bool __ttm_write_lock(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
bool locked = false;
|
||||||
|
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
if (unlikely(lock->kill_takers)) {
|
||||||
|
send_sig(lock->signal, current, 0);
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
|
||||||
|
lock->rw = -1;
|
||||||
|
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
|
||||||
|
locked = true;
|
||||||
|
} else {
|
||||||
|
lock->flags |= TTM_WRITE_LOCK_PENDING;
|
||||||
|
}
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
return locked;
|
||||||
|
}
|
||||||
|
|
||||||
|
int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (interruptible) {
|
||||||
|
ret = wait_event_interruptible(lock->queue,
|
||||||
|
__ttm_write_lock(lock));
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
|
||||||
|
wake_up_all(&lock->queue);
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
wait_event(lock->queue, __ttm_read_lock(lock));
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_write_lock);
|
||||||
|
|
||||||
|
void ttm_write_lock_downgrade(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
lock->rw = 1;
|
||||||
|
wake_up_all(&lock->queue);
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __ttm_vt_unlock(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
if (unlikely(!(lock->flags & TTM_VT_LOCK)))
|
||||||
|
ret = -EINVAL;
|
||||||
|
lock->flags &= ~TTM_VT_LOCK;
|
||||||
|
wake_up_all(&lock->queue);
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
printk(KERN_INFO TTM_PFX "vt unlock.\n");
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
|
||||||
|
{
|
||||||
|
struct ttm_base_object *base = *p_base;
|
||||||
|
struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
*p_base = NULL;
|
||||||
|
ret = __ttm_vt_unlock(lock);
|
||||||
|
BUG_ON(ret != 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool __ttm_vt_lock(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
bool locked = false;
|
||||||
|
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
if (lock->rw == 0) {
|
||||||
|
lock->flags &= ~TTM_VT_LOCK_PENDING;
|
||||||
|
lock->flags |= TTM_VT_LOCK;
|
||||||
|
locked = true;
|
||||||
|
} else {
|
||||||
|
lock->flags |= TTM_VT_LOCK_PENDING;
|
||||||
|
}
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
return locked;
|
||||||
|
}
|
||||||
|
|
||||||
|
int ttm_vt_lock(struct ttm_lock *lock,
|
||||||
|
bool interruptible,
|
||||||
|
struct ttm_object_file *tfile)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (interruptible) {
|
||||||
|
ret = wait_event_interruptible(lock->queue,
|
||||||
|
__ttm_vt_lock(lock));
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
lock->flags &= ~TTM_VT_LOCK_PENDING;
|
||||||
|
wake_up_all(&lock->queue);
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
wait_event(lock->queue, __ttm_vt_lock(lock));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add a base-object, the destructor of which will
|
||||||
|
* make sure the lock is released if the client dies
|
||||||
|
* while holding it.
|
||||||
|
*/
|
||||||
|
|
||||||
|
ret = ttm_base_object_init(tfile, &lock->base, false,
|
||||||
|
ttm_lock_type, &ttm_vt_lock_remove, NULL);
|
||||||
|
if (ret)
|
||||||
|
(void)__ttm_vt_unlock(lock);
|
||||||
|
else {
|
||||||
|
lock->vt_holder = tfile;
|
||||||
|
printk(KERN_INFO TTM_PFX "vt lock.\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_vt_lock);
|
||||||
|
|
||||||
|
int ttm_vt_unlock(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
return ttm_ref_object_base_unref(lock->vt_holder,
|
||||||
|
lock->base.hash.key, TTM_REF_USAGE);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_vt_unlock);
|
||||||
|
|
||||||
|
void ttm_suspend_unlock(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
lock->flags &= ~TTM_SUSPEND_LOCK;
|
||||||
|
wake_up_all(&lock->queue);
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool __ttm_suspend_lock(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
bool locked = false;
|
||||||
|
|
||||||
|
spin_lock(&lock->lock);
|
||||||
|
if (lock->rw == 0) {
|
||||||
|
lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
|
||||||
|
lock->flags |= TTM_SUSPEND_LOCK;
|
||||||
|
locked = true;
|
||||||
|
} else {
|
||||||
|
lock->flags |= TTM_SUSPEND_LOCK_PENDING;
|
||||||
|
}
|
||||||
|
spin_unlock(&lock->lock);
|
||||||
|
return locked;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ttm_suspend_lock(struct ttm_lock *lock)
|
||||||
|
{
|
||||||
|
wait_event(lock->queue, __ttm_suspend_lock(lock));
|
||||||
|
}
|
@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
|
|||||||
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
|
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
|
||||||
const struct sysinfo *si)
|
const struct sysinfo *si)
|
||||||
{
|
{
|
||||||
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
|
struct ttm_mem_zone *zone;
|
||||||
uint64_t mem;
|
uint64_t mem;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (unlikely(!zone))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (si->totalhigh == 0)
|
if (si->totalhigh == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
zone = kzalloc(sizeof(*zone), GFP_KERNEL);
|
||||||
|
if (unlikely(!zone))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
mem = si->totalram;
|
mem = si->totalram;
|
||||||
mem *= si->mem_unit;
|
mem *= si->mem_unit;
|
||||||
|
|
||||||
@ -460,6 +461,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
|
|||||||
{
|
{
|
||||||
return ttm_mem_global_free_zone(glob, NULL, amount);
|
return ttm_mem_global_free_zone(glob, NULL, amount);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_mem_global_free);
|
||||||
|
|
||||||
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
|
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
|
||||||
struct ttm_mem_zone *single_zone,
|
struct ttm_mem_zone *single_zone,
|
||||||
@ -533,6 +535,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
|||||||
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
|
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
|
||||||
interruptible);
|
interruptible);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_mem_global_alloc);
|
||||||
|
|
||||||
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
|
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
|
||||||
struct page *page,
|
struct page *page,
|
||||||
@ -588,3 +591,4 @@ size_t ttm_round_pot(size_t size)
|
|||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_round_pot);
|
||||||
|
452
drivers/gpu/drm/ttm/ttm_object.c
Normal file
452
drivers/gpu/drm/ttm/ttm_object.c
Normal file
@ -0,0 +1,452 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||||
|
*/
|
||||||
|
/** @file ttm_ref_object.c
|
||||||
|
*
|
||||||
|
* Base- and reference object implementation for the various
|
||||||
|
* ttm objects. Implements reference counting, minimal security checks
|
||||||
|
* and release on file close.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct ttm_object_file
|
||||||
|
*
|
||||||
|
* @tdev: Pointer to the ttm_object_device.
|
||||||
|
*
|
||||||
|
* @lock: Lock that protects the ref_list list and the
|
||||||
|
* ref_hash hash tables.
|
||||||
|
*
|
||||||
|
* @ref_list: List of ttm_ref_objects to be destroyed at
|
||||||
|
* file release.
|
||||||
|
*
|
||||||
|
* @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
|
||||||
|
* for fast lookup of ref objects given a base object.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "ttm/ttm_object.h"
|
||||||
|
#include "ttm/ttm_module.h"
|
||||||
|
#include <linux/list.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
|
struct ttm_object_file {
|
||||||
|
struct ttm_object_device *tdev;
|
||||||
|
rwlock_t lock;
|
||||||
|
struct list_head ref_list;
|
||||||
|
struct drm_open_hash ref_hash[TTM_REF_NUM];
|
||||||
|
struct kref refcount;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct ttm_object_device
|
||||||
|
*
|
||||||
|
* @object_lock: lock that protects the object_hash hash table.
|
||||||
|
*
|
||||||
|
* @object_hash: hash table for fast lookup of object global names.
|
||||||
|
*
|
||||||
|
* @object_count: Per device object count.
|
||||||
|
*
|
||||||
|
* This is the per-device data structure needed for ttm object management.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct ttm_object_device {
|
||||||
|
rwlock_t object_lock;
|
||||||
|
struct drm_open_hash object_hash;
|
||||||
|
atomic_t object_count;
|
||||||
|
struct ttm_mem_global *mem_glob;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct ttm_ref_object
|
||||||
|
*
|
||||||
|
* @hash: Hash entry for the per-file object reference hash.
|
||||||
|
*
|
||||||
|
* @head: List entry for the per-file list of ref-objects.
|
||||||
|
*
|
||||||
|
* @kref: Ref count.
|
||||||
|
*
|
||||||
|
* @obj: Base object this ref object is referencing.
|
||||||
|
*
|
||||||
|
* @ref_type: Type of ref object.
|
||||||
|
*
|
||||||
|
* This is similar to an idr object, but it also has a hash table entry
|
||||||
|
* that allows lookup with a pointer to the referenced object as a key. In
|
||||||
|
* that way, one can easily detect whether a base object is referenced by
|
||||||
|
* a particular ttm_object_file. It also carries a ref count to avoid creating
|
||||||
|
* multiple ref objects if a ttm_object_file references the same base
|
||||||
|
* object more than once.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct ttm_ref_object {
|
||||||
|
struct drm_hash_item hash;
|
||||||
|
struct list_head head;
|
||||||
|
struct kref kref;
|
||||||
|
struct ttm_base_object *obj;
|
||||||
|
enum ttm_ref_type ref_type;
|
||||||
|
struct ttm_object_file *tfile;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline struct ttm_object_file *
|
||||||
|
ttm_object_file_ref(struct ttm_object_file *tfile)
|
||||||
|
{
|
||||||
|
kref_get(&tfile->refcount);
|
||||||
|
return tfile;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ttm_object_file_destroy(struct kref *kref)
|
||||||
|
{
|
||||||
|
struct ttm_object_file *tfile =
|
||||||
|
container_of(kref, struct ttm_object_file, refcount);
|
||||||
|
|
||||||
|
kfree(tfile);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
|
||||||
|
{
|
||||||
|
struct ttm_object_file *tfile = *p_tfile;
|
||||||
|
|
||||||
|
*p_tfile = NULL;
|
||||||
|
kref_put(&tfile->refcount, ttm_object_file_destroy);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int ttm_base_object_init(struct ttm_object_file *tfile,
|
||||||
|
struct ttm_base_object *base,
|
||||||
|
bool shareable,
|
||||||
|
enum ttm_object_type object_type,
|
||||||
|
void (*refcount_release) (struct ttm_base_object **),
|
||||||
|
void (*ref_obj_release) (struct ttm_base_object *,
|
||||||
|
enum ttm_ref_type ref_type))
|
||||||
|
{
|
||||||
|
struct ttm_object_device *tdev = tfile->tdev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
base->shareable = shareable;
|
||||||
|
base->tfile = ttm_object_file_ref(tfile);
|
||||||
|
base->refcount_release = refcount_release;
|
||||||
|
base->ref_obj_release = ref_obj_release;
|
||||||
|
base->object_type = object_type;
|
||||||
|
write_lock(&tdev->object_lock);
|
||||||
|
kref_init(&base->refcount);
|
||||||
|
ret = drm_ht_just_insert_please(&tdev->object_hash,
|
||||||
|
&base->hash,
|
||||||
|
(unsigned long)base, 31, 0, 0);
|
||||||
|
write_unlock(&tdev->object_lock);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_err0;
|
||||||
|
|
||||||
|
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
goto out_err1;
|
||||||
|
|
||||||
|
ttm_base_object_unref(&base);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
out_err1:
|
||||||
|
(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
|
||||||
|
out_err0:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_base_object_init);
|
||||||
|
|
||||||
|
static void ttm_release_base(struct kref *kref)
|
||||||
|
{
|
||||||
|
struct ttm_base_object *base =
|
||||||
|
container_of(kref, struct ttm_base_object, refcount);
|
||||||
|
struct ttm_object_device *tdev = base->tfile->tdev;
|
||||||
|
|
||||||
|
(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
|
||||||
|
write_unlock(&tdev->object_lock);
|
||||||
|
if (base->refcount_release) {
|
||||||
|
ttm_object_file_unref(&base->tfile);
|
||||||
|
base->refcount_release(&base);
|
||||||
|
}
|
||||||
|
write_lock(&tdev->object_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ttm_base_object_unref(struct ttm_base_object **p_base)
|
||||||
|
{
|
||||||
|
struct ttm_base_object *base = *p_base;
|
||||||
|
struct ttm_object_device *tdev = base->tfile->tdev;
|
||||||
|
|
||||||
|
*p_base = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Need to take the lock here to avoid racing with
|
||||||
|
* users trying to look up the object.
|
||||||
|
*/
|
||||||
|
|
||||||
|
write_lock(&tdev->object_lock);
|
||||||
|
(void)kref_put(&base->refcount, &ttm_release_base);
|
||||||
|
write_unlock(&tdev->object_lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_base_object_unref);
|
||||||
|
|
||||||
|
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
|
||||||
|
uint32_t key)
|
||||||
|
{
|
||||||
|
struct ttm_object_device *tdev = tfile->tdev;
|
||||||
|
struct ttm_base_object *base;
|
||||||
|
struct drm_hash_item *hash;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
read_lock(&tdev->object_lock);
|
||||||
|
ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
|
||||||
|
|
||||||
|
if (likely(ret == 0)) {
|
||||||
|
base = drm_hash_entry(hash, struct ttm_base_object, hash);
|
||||||
|
kref_get(&base->refcount);
|
||||||
|
}
|
||||||
|
read_unlock(&tdev->object_lock);
|
||||||
|
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (tfile != base->tfile && !base->shareable) {
|
||||||
|
printk(KERN_ERR TTM_PFX
|
||||||
|
"Attempted access of non-shareable object.\n");
|
||||||
|
ttm_base_object_unref(&base);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return base;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_base_object_lookup);
|
||||||
|
|
||||||
|
int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||||
|
struct ttm_base_object *base,
|
||||||
|
enum ttm_ref_type ref_type, bool *existed)
|
||||||
|
{
|
||||||
|
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
|
||||||
|
struct ttm_ref_object *ref;
|
||||||
|
struct drm_hash_item *hash;
|
||||||
|
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
|
||||||
|
int ret = -EINVAL;
|
||||||
|
|
||||||
|
if (existed != NULL)
|
||||||
|
*existed = true;
|
||||||
|
|
||||||
|
while (ret == -EINVAL) {
|
||||||
|
read_lock(&tfile->lock);
|
||||||
|
ret = drm_ht_find_item(ht, base->hash.key, &hash);
|
||||||
|
|
||||||
|
if (ret == 0) {
|
||||||
|
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||||
|
kref_get(&ref->kref);
|
||||||
|
read_unlock(&tfile->lock);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
read_unlock(&tfile->lock);
|
||||||
|
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
|
||||||
|
false, false);
|
||||||
|
if (unlikely(ret != 0))
|
||||||
|
return ret;
|
||||||
|
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
|
||||||
|
if (unlikely(ref == NULL)) {
|
||||||
|
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
ref->hash.key = base->hash.key;
|
||||||
|
ref->obj = base;
|
||||||
|
ref->tfile = tfile;
|
||||||
|
ref->ref_type = ref_type;
|
||||||
|
kref_init(&ref->kref);
|
||||||
|
|
||||||
|
write_lock(&tfile->lock);
|
||||||
|
ret = drm_ht_insert_item(ht, &ref->hash);
|
||||||
|
|
||||||
|
if (likely(ret == 0)) {
|
||||||
|
list_add_tail(&ref->head, &tfile->ref_list);
|
||||||
|
kref_get(&base->refcount);
|
||||||
|
write_unlock(&tfile->lock);
|
||||||
|
if (existed != NULL)
|
||||||
|
*existed = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
write_unlock(&tfile->lock);
|
||||||
|
BUG_ON(ret != -EINVAL);
|
||||||
|
|
||||||
|
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||||
|
kfree(ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_ref_object_add);
|
||||||
|
|
||||||
|
static void ttm_ref_object_release(struct kref *kref)
|
||||||
|
{
|
||||||
|
struct ttm_ref_object *ref =
|
||||||
|
container_of(kref, struct ttm_ref_object, kref);
|
||||||
|
struct ttm_base_object *base = ref->obj;
|
||||||
|
struct ttm_object_file *tfile = ref->tfile;
|
||||||
|
struct drm_open_hash *ht;
|
||||||
|
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
|
||||||
|
|
||||||
|
ht = &tfile->ref_hash[ref->ref_type];
|
||||||
|
(void)drm_ht_remove_item(ht, &ref->hash);
|
||||||
|
list_del(&ref->head);
|
||||||
|
write_unlock(&tfile->lock);
|
||||||
|
|
||||||
|
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
|
||||||
|
base->ref_obj_release(base, ref->ref_type);
|
||||||
|
|
||||||
|
ttm_base_object_unref(&ref->obj);
|
||||||
|
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||||
|
kfree(ref);
|
||||||
|
write_lock(&tfile->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
||||||
|
unsigned long key, enum ttm_ref_type ref_type)
|
||||||
|
{
|
||||||
|
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
|
||||||
|
struct ttm_ref_object *ref;
|
||||||
|
struct drm_hash_item *hash;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
write_lock(&tfile->lock);
|
||||||
|
ret = drm_ht_find_item(ht, key, &hash);
|
||||||
|
if (unlikely(ret != 0)) {
|
||||||
|
write_unlock(&tfile->lock);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||||
|
kref_put(&ref->kref, ttm_ref_object_release);
|
||||||
|
write_unlock(&tfile->lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_ref_object_base_unref);
|
||||||
|
|
||||||
|
void ttm_object_file_release(struct ttm_object_file **p_tfile)
|
||||||
|
{
|
||||||
|
struct ttm_ref_object *ref;
|
||||||
|
struct list_head *list;
|
||||||
|
unsigned int i;
|
||||||
|
struct ttm_object_file *tfile = *p_tfile;
|
||||||
|
|
||||||
|
*p_tfile = NULL;
|
||||||
|
write_lock(&tfile->lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since we release the lock within the loop, we have to
|
||||||
|
* restart it from the beginning each time.
|
||||||
|
*/
|
||||||
|
|
||||||
|
while (!list_empty(&tfile->ref_list)) {
|
||||||
|
list = tfile->ref_list.next;
|
||||||
|
ref = list_entry(list, struct ttm_ref_object, head);
|
||||||
|
ttm_ref_object_release(&ref->kref);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < TTM_REF_NUM; ++i)
|
||||||
|
drm_ht_remove(&tfile->ref_hash[i]);
|
||||||
|
|
||||||
|
write_unlock(&tfile->lock);
|
||||||
|
ttm_object_file_unref(&tfile);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_object_file_release);
|
||||||
|
|
||||||
|
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
|
||||||
|
unsigned int hash_order)
|
||||||
|
{
|
||||||
|
struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
|
||||||
|
unsigned int i;
|
||||||
|
unsigned int j = 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (unlikely(tfile == NULL))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
rwlock_init(&tfile->lock);
|
||||||
|
tfile->tdev = tdev;
|
||||||
|
kref_init(&tfile->refcount);
|
||||||
|
INIT_LIST_HEAD(&tfile->ref_list);
|
||||||
|
|
||||||
|
for (i = 0; i < TTM_REF_NUM; ++i) {
|
||||||
|
ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
|
||||||
|
if (ret) {
|
||||||
|
j = i;
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tfile;
|
||||||
|
out_err:
|
||||||
|
for (i = 0; i < j; ++i)
|
||||||
|
drm_ht_remove(&tfile->ref_hash[i]);
|
||||||
|
|
||||||
|
kfree(tfile);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_object_file_init);
|
||||||
|
|
||||||
|
struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
|
||||||
|
*mem_glob,
|
||||||
|
unsigned int hash_order)
|
||||||
|
{
|
||||||
|
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (unlikely(tdev == NULL))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
tdev->mem_glob = mem_glob;
|
||||||
|
rwlock_init(&tdev->object_lock);
|
||||||
|
atomic_set(&tdev->object_count, 0);
|
||||||
|
ret = drm_ht_create(&tdev->object_hash, hash_order);
|
||||||
|
|
||||||
|
if (likely(ret == 0))
|
||||||
|
return tdev;
|
||||||
|
|
||||||
|
kfree(tdev);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_object_device_init);
|
||||||
|
|
||||||
|
void ttm_object_device_release(struct ttm_object_device **p_tdev)
|
||||||
|
{
|
||||||
|
struct ttm_object_device *tdev = *p_tdev;
|
||||||
|
|
||||||
|
*p_tdev = NULL;
|
||||||
|
|
||||||
|
write_lock(&tdev->object_lock);
|
||||||
|
drm_ht_remove(&tdev->object_hash);
|
||||||
|
write_unlock(&tdev->object_lock);
|
||||||
|
|
||||||
|
kfree(tdev);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_object_device_release);
|
@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
|
|||||||
ttm->state = tt_unbound;
|
ttm->state = tt_unbound;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_tt_populate);
|
||||||
|
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
static inline int ttm_tt_set_page_caching(struct page *p,
|
static inline int ttm_tt_set_page_caching(struct page *p,
|
||||||
|
@ -36,17 +36,27 @@
|
|||||||
#ifndef _DRM_H_
|
#ifndef _DRM_H_
|
||||||
#define _DRM_H_
|
#define _DRM_H_
|
||||||
|
|
||||||
#include <linux/types.h>
|
#if defined(__linux__)
|
||||||
#include <asm/ioctl.h> /* For _IO* macros */
|
|
||||||
#define DRM_IOCTL_NR(n) _IOC_NR(n)
|
|
||||||
#define DRM_IOC_VOID _IOC_NONE
|
|
||||||
#define DRM_IOC_READ _IOC_READ
|
|
||||||
#define DRM_IOC_WRITE _IOC_WRITE
|
|
||||||
#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
|
|
||||||
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
|
|
||||||
|
|
||||||
#define DRM_MAJOR 226
|
#include <linux/types.h>
|
||||||
#define DRM_MAX_MINOR 15
|
#include <asm/ioctl.h>
|
||||||
|
typedef unsigned int drm_handle_t;
|
||||||
|
|
||||||
|
#else /* One of the BSDs */
|
||||||
|
|
||||||
|
#include <sys/ioccom.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
typedef int8_t __s8;
|
||||||
|
typedef uint8_t __u8;
|
||||||
|
typedef int16_t __s16;
|
||||||
|
typedef uint16_t __u16;
|
||||||
|
typedef int32_t __s32;
|
||||||
|
typedef uint32_t __u32;
|
||||||
|
typedef int64_t __s64;
|
||||||
|
typedef uint64_t __u64;
|
||||||
|
typedef unsigned long drm_handle_t;
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
|
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
|
||||||
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
|
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
|
||||||
@ -59,7 +69,6 @@
|
|||||||
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
|
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
|
||||||
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
|
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
|
||||||
|
|
||||||
typedef unsigned int drm_handle_t;
|
|
||||||
typedef unsigned int drm_context_t;
|
typedef unsigned int drm_context_t;
|
||||||
typedef unsigned int drm_drawable_t;
|
typedef unsigned int drm_drawable_t;
|
||||||
typedef unsigned int drm_magic_t;
|
typedef unsigned int drm_magic_t;
|
||||||
@ -454,6 +463,7 @@ struct drm_irq_busid {
|
|||||||
enum drm_vblank_seq_type {
|
enum drm_vblank_seq_type {
|
||||||
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
|
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
|
||||||
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
|
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
|
||||||
|
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
|
||||||
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
|
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
|
||||||
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
|
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
|
||||||
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
|
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
|
||||||
@ -461,8 +471,8 @@ enum drm_vblank_seq_type {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
|
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
|
||||||
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
|
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
|
||||||
_DRM_VBLANK_NEXTONMISS)
|
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
|
||||||
|
|
||||||
struct drm_wait_vblank_request {
|
struct drm_wait_vblank_request {
|
||||||
enum drm_vblank_seq_type type;
|
enum drm_vblank_seq_type type;
|
||||||
@ -686,6 +696,8 @@ struct drm_gem_open {
|
|||||||
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
|
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
|
||||||
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
|
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
|
||||||
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
|
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
|
||||||
|
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
|
||||||
|
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Device specific ioctls should only be in their respective headers
|
* Device specific ioctls should only be in their respective headers
|
||||||
@ -698,6 +710,34 @@ struct drm_gem_open {
|
|||||||
#define DRM_COMMAND_BASE 0x40
|
#define DRM_COMMAND_BASE 0x40
|
||||||
#define DRM_COMMAND_END 0xA0
|
#define DRM_COMMAND_END 0xA0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Header for events written back to userspace on the drm fd. The
|
||||||
|
* type defines the type of event, the length specifies the total
|
||||||
|
* length of the event (including the header), and user_data is
|
||||||
|
* typically a 64 bit value passed with the ioctl that triggered the
|
||||||
|
* event. A read on the drm fd will always only return complete
|
||||||
|
* events, that is, if for example the read buffer is 100 bytes, and
|
||||||
|
* there are two 64 byte events pending, only one will be returned.
|
||||||
|
*
|
||||||
|
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
|
||||||
|
* up are chipset specific.
|
||||||
|
*/
|
||||||
|
struct drm_event {
|
||||||
|
__u32 type;
|
||||||
|
__u32 length;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define DRM_EVENT_VBLANK 0x01
|
||||||
|
|
||||||
|
struct drm_event_vblank {
|
||||||
|
struct drm_event base;
|
||||||
|
__u64 user_data;
|
||||||
|
__u32 tv_sec;
|
||||||
|
__u32 tv_usec;
|
||||||
|
__u32 sequence;
|
||||||
|
__u32 reserved;
|
||||||
|
};
|
||||||
|
|
||||||
/* typedef area */
|
/* typedef area */
|
||||||
#ifndef __KERNEL__
|
#ifndef __KERNEL__
|
||||||
typedef struct drm_clip_rect drm_clip_rect_t;
|
typedef struct drm_clip_rect drm_clip_rect_t;
|
||||||
|
@ -245,16 +245,6 @@ extern void drm_ut_debug_printk(unsigned int request_level,
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define DRM_PROC_LIMIT (PAGE_SIZE-80)
|
|
||||||
|
|
||||||
#define DRM_PROC_PRINT(fmt, arg...) \
|
|
||||||
len += sprintf(&buf[len], fmt , ##arg); \
|
|
||||||
if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
|
|
||||||
|
|
||||||
#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
|
|
||||||
len += sprintf(&buf[len], fmt , ##arg); \
|
|
||||||
if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
|
|
||||||
|
|
||||||
/*@}*/
|
/*@}*/
|
||||||
|
|
||||||
/***********************************************************************/
|
/***********************************************************************/
|
||||||
@ -265,19 +255,8 @@ extern void drm_ut_debug_printk(unsigned int request_level,
|
|||||||
|
|
||||||
#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
|
#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
|
||||||
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
|
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
|
||||||
#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
|
|
||||||
|
|
||||||
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
|
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
|
||||||
/**
|
|
||||||
* Get the private SAREA mapping.
|
|
||||||
*
|
|
||||||
* \param _dev DRM device.
|
|
||||||
* \param _ctx context number.
|
|
||||||
* \param _map output mapping.
|
|
||||||
*/
|
|
||||||
#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
|
|
||||||
(_map) = (_dev)->context_sareas[_ctx]; \
|
|
||||||
} while(0)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test that the hardware lock is held by the caller, returning otherwise.
|
* Test that the hardware lock is held by the caller, returning otherwise.
|
||||||
@ -296,18 +275,6 @@ do { \
|
|||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/**
|
|
||||||
* Copy and IOCTL return string to user space
|
|
||||||
*/
|
|
||||||
#define DRM_COPY( name, value ) \
|
|
||||||
len = strlen( value ); \
|
|
||||||
if ( len > name##_len ) len = name##_len; \
|
|
||||||
name##_len = strlen( value ); \
|
|
||||||
if ( len && name ) { \
|
|
||||||
if ( copy_to_user( name, value, len ) ) \
|
|
||||||
return -EFAULT; \
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ioctl function type.
|
* Ioctl function type.
|
||||||
*
|
*
|
||||||
@ -322,6 +289,9 @@ typedef int drm_ioctl_t(struct drm_device *dev, void *data,
|
|||||||
typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
|
typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
|
||||||
unsigned long arg);
|
unsigned long arg);
|
||||||
|
|
||||||
|
#define DRM_IOCTL_NR(n) _IOC_NR(n)
|
||||||
|
#define DRM_MAJOR 226
|
||||||
|
|
||||||
#define DRM_AUTH 0x1
|
#define DRM_AUTH 0x1
|
||||||
#define DRM_MASTER 0x2
|
#define DRM_MASTER 0x2
|
||||||
#define DRM_ROOT_ONLY 0x4
|
#define DRM_ROOT_ONLY 0x4
|
||||||
@ -426,6 +396,14 @@ struct drm_buf_entry {
|
|||||||
struct drm_freelist freelist;
|
struct drm_freelist freelist;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Event queued up for userspace to read */
|
||||||
|
struct drm_pending_event {
|
||||||
|
struct drm_event *event;
|
||||||
|
struct list_head link;
|
||||||
|
struct drm_file *file_priv;
|
||||||
|
void (*destroy)(struct drm_pending_event *event);
|
||||||
|
};
|
||||||
|
|
||||||
/** File private data */
|
/** File private data */
|
||||||
struct drm_file {
|
struct drm_file {
|
||||||
int authenticated;
|
int authenticated;
|
||||||
@ -449,6 +427,10 @@ struct drm_file {
|
|||||||
struct drm_master *master; /* master this node is currently associated with
|
struct drm_master *master; /* master this node is currently associated with
|
||||||
N.B. not always minor->master */
|
N.B. not always minor->master */
|
||||||
struct list_head fbs;
|
struct list_head fbs;
|
||||||
|
|
||||||
|
wait_queue_head_t event_wait;
|
||||||
|
struct list_head event_list;
|
||||||
|
int event_space;
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Wait queue */
|
/** Wait queue */
|
||||||
@ -795,6 +777,15 @@ struct drm_driver {
|
|||||||
/* Master routines */
|
/* Master routines */
|
||||||
int (*master_create)(struct drm_device *dev, struct drm_master *master);
|
int (*master_create)(struct drm_device *dev, struct drm_master *master);
|
||||||
void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
|
void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
|
||||||
|
/**
|
||||||
|
* master_set is called whenever the minor master is set.
|
||||||
|
* master_drop is called whenever the minor master is dropped.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
|
||||||
|
bool from_open);
|
||||||
|
void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
|
||||||
|
bool from_release);
|
||||||
|
|
||||||
int (*proc_init)(struct drm_minor *minor);
|
int (*proc_init)(struct drm_minor *minor);
|
||||||
void (*proc_cleanup)(struct drm_minor *minor);
|
void (*proc_cleanup)(struct drm_minor *minor);
|
||||||
@ -900,6 +891,12 @@ struct drm_minor {
|
|||||||
struct drm_mode_group mode_group;
|
struct drm_mode_group mode_group;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct drm_pending_vblank_event {
|
||||||
|
struct drm_pending_event base;
|
||||||
|
int pipe;
|
||||||
|
struct drm_event_vblank event;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DRM device structure. This structure represent a complete card that
|
* DRM device structure. This structure represent a complete card that
|
||||||
* may contain multiple heads.
|
* may contain multiple heads.
|
||||||
@ -999,6 +996,12 @@ struct drm_device {
|
|||||||
|
|
||||||
u32 max_vblank_count; /**< size of vblank counter register */
|
u32 max_vblank_count; /**< size of vblank counter register */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List of events
|
||||||
|
*/
|
||||||
|
struct list_head vblank_event_list;
|
||||||
|
spinlock_t event_lock;
|
||||||
|
|
||||||
/*@} */
|
/*@} */
|
||||||
cycles_t ctx_start;
|
cycles_t ctx_start;
|
||||||
cycles_t lck_start;
|
cycles_t lck_start;
|
||||||
@ -1135,6 +1138,8 @@ extern int drm_lastclose(struct drm_device *dev);
|
|||||||
extern int drm_open(struct inode *inode, struct file *filp);
|
extern int drm_open(struct inode *inode, struct file *filp);
|
||||||
extern int drm_stub_open(struct inode *inode, struct file *filp);
|
extern int drm_stub_open(struct inode *inode, struct file *filp);
|
||||||
extern int drm_fasync(int fd, struct file *filp, int on);
|
extern int drm_fasync(int fd, struct file *filp, int on);
|
||||||
|
extern ssize_t drm_read(struct file *filp, char __user *buffer,
|
||||||
|
size_t count, loff_t *offset);
|
||||||
extern int drm_release(struct inode *inode, struct file *filp);
|
extern int drm_release(struct inode *inode, struct file *filp);
|
||||||
|
|
||||||
/* Mapping support (drm_vm.h) */
|
/* Mapping support (drm_vm.h) */
|
||||||
|
@ -123,7 +123,7 @@ struct drm_display_mode {
|
|||||||
int type;
|
int type;
|
||||||
|
|
||||||
/* Proposed mode values */
|
/* Proposed mode values */
|
||||||
int clock;
|
int clock; /* in kHz */
|
||||||
int hdisplay;
|
int hdisplay;
|
||||||
int hsync_start;
|
int hsync_start;
|
||||||
int hsync_end;
|
int hsync_end;
|
||||||
@ -164,8 +164,8 @@ struct drm_display_mode {
|
|||||||
int *private;
|
int *private;
|
||||||
int private_flags;
|
int private_flags;
|
||||||
|
|
||||||
int vrefresh;
|
int vrefresh; /* in Hz */
|
||||||
float hsync;
|
int hsync; /* in kHz */
|
||||||
};
|
};
|
||||||
|
|
||||||
enum drm_connector_status {
|
enum drm_connector_status {
|
||||||
@ -242,6 +242,21 @@ struct drm_framebuffer_funcs {
|
|||||||
int (*create_handle)(struct drm_framebuffer *fb,
|
int (*create_handle)(struct drm_framebuffer *fb,
|
||||||
struct drm_file *file_priv,
|
struct drm_file *file_priv,
|
||||||
unsigned int *handle);
|
unsigned int *handle);
|
||||||
|
/**
|
||||||
|
* Optinal callback for the dirty fb ioctl.
|
||||||
|
*
|
||||||
|
* Userspace can notify the driver via this callback
|
||||||
|
* that a area of the framebuffer has changed and should
|
||||||
|
* be flushed to the display hardware.
|
||||||
|
*
|
||||||
|
* See documentation in drm_mode.h for the struct
|
||||||
|
* drm_mode_fb_dirty_cmd for more information as all
|
||||||
|
* the semantics and arguments have a one to one mapping
|
||||||
|
* on this function.
|
||||||
|
*/
|
||||||
|
int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags,
|
||||||
|
unsigned color, struct drm_clip_rect *clips,
|
||||||
|
unsigned num_clips);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_framebuffer {
|
struct drm_framebuffer {
|
||||||
@ -256,7 +271,7 @@ struct drm_framebuffer {
|
|||||||
unsigned int depth;
|
unsigned int depth;
|
||||||
int bits_per_pixel;
|
int bits_per_pixel;
|
||||||
int flags;
|
int flags;
|
||||||
void *fbdev;
|
struct fb_info *fbdev;
|
||||||
u32 pseudo_palette[17];
|
u32 pseudo_palette[17];
|
||||||
struct list_head filp_head;
|
struct list_head filp_head;
|
||||||
/* if you are using the helper */
|
/* if you are using the helper */
|
||||||
@ -290,6 +305,7 @@ struct drm_property {
|
|||||||
struct drm_crtc;
|
struct drm_crtc;
|
||||||
struct drm_connector;
|
struct drm_connector;
|
||||||
struct drm_encoder;
|
struct drm_encoder;
|
||||||
|
struct drm_pending_vblank_event;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_crtc_funcs - control CRTCs for a given device
|
* drm_crtc_funcs - control CRTCs for a given device
|
||||||
@ -333,6 +349,19 @@ struct drm_crtc_funcs {
|
|||||||
void (*destroy)(struct drm_crtc *crtc);
|
void (*destroy)(struct drm_crtc *crtc);
|
||||||
|
|
||||||
int (*set_config)(struct drm_mode_set *set);
|
int (*set_config)(struct drm_mode_set *set);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flip to the given framebuffer. This implements the page
|
||||||
|
* flip ioctl descibed in drm_mode.h, specifically, the
|
||||||
|
* implementation must return immediately and block all
|
||||||
|
* rendering to the current fb until the flip has completed.
|
||||||
|
* If userspace set the event flag in the ioctl, the event
|
||||||
|
* argument will point to an event to send back when the flip
|
||||||
|
* completes, otherwise it will be NULL.
|
||||||
|
*/
|
||||||
|
int (*page_flip)(struct drm_crtc *crtc,
|
||||||
|
struct drm_framebuffer *fb,
|
||||||
|
struct drm_pending_vblank_event *event);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -596,6 +625,7 @@ struct drm_mode_config {
|
|||||||
/* Optional properties */
|
/* Optional properties */
|
||||||
struct drm_property *scaling_mode_property;
|
struct drm_property *scaling_mode_property;
|
||||||
struct drm_property *dithering_mode_property;
|
struct drm_property *dithering_mode_property;
|
||||||
|
struct drm_property *dirty_info_property;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
|
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
|
||||||
@ -667,6 +697,7 @@ extern void drm_mode_validate_size(struct drm_device *dev,
|
|||||||
extern void drm_mode_prune_invalid(struct drm_device *dev,
|
extern void drm_mode_prune_invalid(struct drm_device *dev,
|
||||||
struct list_head *mode_list, bool verbose);
|
struct list_head *mode_list, bool verbose);
|
||||||
extern void drm_mode_sort(struct list_head *mode_list);
|
extern void drm_mode_sort(struct list_head *mode_list);
|
||||||
|
extern int drm_mode_hsync(struct drm_display_mode *mode);
|
||||||
extern int drm_mode_vrefresh(struct drm_display_mode *mode);
|
extern int drm_mode_vrefresh(struct drm_display_mode *mode);
|
||||||
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
|
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
|
||||||
int adjust_flags);
|
int adjust_flags);
|
||||||
@ -703,6 +734,7 @@ extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats
|
|||||||
char *formats[]);
|
char *formats[]);
|
||||||
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
|
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
|
||||||
extern int drm_mode_create_dithering_property(struct drm_device *dev);
|
extern int drm_mode_create_dithering_property(struct drm_device *dev);
|
||||||
|
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
|
||||||
extern char *drm_get_encoder_name(struct drm_encoder *encoder);
|
extern char *drm_get_encoder_name(struct drm_encoder *encoder);
|
||||||
|
|
||||||
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
|
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
|
||||||
@ -730,6 +762,8 @@ extern int drm_mode_rmfb(struct drm_device *dev,
|
|||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern int drm_mode_getfb(struct drm_device *dev,
|
extern int drm_mode_getfb(struct drm_device *dev,
|
||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
|
extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
|
||||||
|
void *data, struct drm_file *file_priv);
|
||||||
extern int drm_mode_addmode_ioctl(struct drm_device *dev,
|
extern int drm_mode_addmode_ioctl(struct drm_device *dev,
|
||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
|
extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
|
||||||
@ -756,6 +790,8 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
|
|||||||
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
|
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
|
||||||
void *data, struct drm_file *file_priv);
|
void *data, struct drm_file *file_priv);
|
||||||
extern bool drm_detect_hdmi_monitor(struct edid *edid);
|
extern bool drm_detect_hdmi_monitor(struct edid *edid);
|
||||||
|
extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
||||||
|
void *data, struct drm_file *file_priv);
|
||||||
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
|
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
|
||||||
int hdisplay, int vdisplay, int vrefresh,
|
int hdisplay, int vdisplay, int vrefresh,
|
||||||
bool reduced, bool interlaced, bool margins);
|
bool reduced, bool interlaced, bool margins);
|
||||||
|
@ -20,8 +20,8 @@
|
|||||||
* OF THIS SOFTWARE.
|
* OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _INTEL_DP_H_
|
#ifndef _DRM_DP_HELPER_H_
|
||||||
#define _INTEL_DP_H_
|
#define _DRM_DP_HELPER_H_
|
||||||
|
|
||||||
/* From the VESA DisplayPort spec */
|
/* From the VESA DisplayPort spec */
|
||||||
|
|
||||||
@ -130,15 +130,20 @@
|
|||||||
#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
|
#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
|
||||||
#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
|
#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
|
||||||
|
|
||||||
|
#define MODE_I2C_START 1
|
||||||
|
#define MODE_I2C_WRITE 2
|
||||||
|
#define MODE_I2C_READ 4
|
||||||
|
#define MODE_I2C_STOP 8
|
||||||
|
|
||||||
struct i2c_algo_dp_aux_data {
|
struct i2c_algo_dp_aux_data {
|
||||||
bool running;
|
bool running;
|
||||||
u16 address;
|
u16 address;
|
||||||
int (*aux_ch) (struct i2c_adapter *adapter,
|
int (*aux_ch) (struct i2c_adapter *adapter,
|
||||||
uint8_t *send, int send_bytes,
|
int mode, uint8_t write_byte,
|
||||||
uint8_t *recv, int recv_bytes);
|
uint8_t *read_byte);
|
||||||
};
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
|
i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
|
||||||
|
|
||||||
#endif /* _INTEL_DP_H_ */
|
#endif /* _DRM_DP_HELPER_H_ */
|
@ -106,6 +106,10 @@ struct detailed_data_color_point {
|
|||||||
u8 wpindex2[3];
|
u8 wpindex2[3];
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
struct cvt_timing {
|
||||||
|
u8 code[3];
|
||||||
|
} __attribute__((packed));
|
||||||
|
|
||||||
struct detailed_non_pixel {
|
struct detailed_non_pixel {
|
||||||
u8 pad1;
|
u8 pad1;
|
||||||
u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
|
u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
|
||||||
@ -117,9 +121,13 @@ struct detailed_non_pixel {
|
|||||||
struct detailed_data_monitor_range range;
|
struct detailed_data_monitor_range range;
|
||||||
struct detailed_data_wpindex color;
|
struct detailed_data_wpindex color;
|
||||||
struct std_timing timings[5];
|
struct std_timing timings[5];
|
||||||
|
struct cvt_timing cvt[4];
|
||||||
} data;
|
} data;
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
#define EDID_DETAIL_EST_TIMINGS 0xf7
|
||||||
|
#define EDID_DETAIL_CVT_3BYTE 0xf8
|
||||||
|
#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
|
||||||
#define EDID_DETAIL_STD_MODES 0xfa
|
#define EDID_DETAIL_STD_MODES 0xfa
|
||||||
#define EDID_DETAIL_MONITOR_CPDATA 0xfb
|
#define EDID_DETAIL_MONITOR_CPDATA 0xfb
|
||||||
#define EDID_DETAIL_MONITOR_NAME 0xfc
|
#define EDID_DETAIL_MONITOR_NAME 0xfc
|
||||||
|
@ -27,9 +27,6 @@
|
|||||||
#ifndef _DRM_MODE_H
|
#ifndef _DRM_MODE_H
|
||||||
#define _DRM_MODE_H
|
#define _DRM_MODE_H
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/types.h>
|
|
||||||
|
|
||||||
#define DRM_DISPLAY_INFO_LEN 32
|
#define DRM_DISPLAY_INFO_LEN 32
|
||||||
#define DRM_CONNECTOR_NAME_LEN 32
|
#define DRM_CONNECTOR_NAME_LEN 32
|
||||||
#define DRM_DISPLAY_MODE_LEN 32
|
#define DRM_DISPLAY_MODE_LEN 32
|
||||||
@ -78,6 +75,11 @@
|
|||||||
#define DRM_MODE_DITHERING_OFF 0
|
#define DRM_MODE_DITHERING_OFF 0
|
||||||
#define DRM_MODE_DITHERING_ON 1
|
#define DRM_MODE_DITHERING_ON 1
|
||||||
|
|
||||||
|
/* Dirty info options */
|
||||||
|
#define DRM_MODE_DIRTY_OFF 0
|
||||||
|
#define DRM_MODE_DIRTY_ON 1
|
||||||
|
#define DRM_MODE_DIRTY_ANNOTATE 2
|
||||||
|
|
||||||
struct drm_mode_modeinfo {
|
struct drm_mode_modeinfo {
|
||||||
__u32 clock;
|
__u32 clock;
|
||||||
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
|
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
|
||||||
@ -225,6 +227,45 @@ struct drm_mode_fb_cmd {
|
|||||||
__u32 handle;
|
__u32 handle;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
|
||||||
|
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
|
||||||
|
#define DRM_MODE_FB_DIRTY_FLAGS 0x03
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mark a region of a framebuffer as dirty.
|
||||||
|
*
|
||||||
|
* Some hardware does not automatically update display contents
|
||||||
|
* as a hardware or software draw to a framebuffer. This ioctl
|
||||||
|
* allows userspace to tell the kernel and the hardware what
|
||||||
|
* regions of the framebuffer have changed.
|
||||||
|
*
|
||||||
|
* The kernel or hardware is free to update more then just the
|
||||||
|
* region specified by the clip rects. The kernel or hardware
|
||||||
|
* may also delay and/or coalesce several calls to dirty into a
|
||||||
|
* single update.
|
||||||
|
*
|
||||||
|
* Userspace may annotate the updates, the annotates are a
|
||||||
|
* promise made by the caller that the change is either a copy
|
||||||
|
* of pixels or a fill of a single color in the region specified.
|
||||||
|
*
|
||||||
|
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
|
||||||
|
* the number of updated regions are half of num_clips given,
|
||||||
|
* where the clip rects are paired in src and dst. The width and
|
||||||
|
* height of each one of the pairs must match.
|
||||||
|
*
|
||||||
|
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
|
||||||
|
* promises that the region specified of the clip rects is filled
|
||||||
|
* completely with a single color as given in the color argument.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct drm_mode_fb_dirty_cmd {
|
||||||
|
__u32 fb_id;
|
||||||
|
__u32 flags;
|
||||||
|
__u32 color;
|
||||||
|
__u32 num_clips;
|
||||||
|
__u64 clips_ptr;
|
||||||
|
};
|
||||||
|
|
||||||
struct drm_mode_mode_cmd {
|
struct drm_mode_mode_cmd {
|
||||||
__u32 connector_id;
|
__u32 connector_id;
|
||||||
struct drm_mode_modeinfo mode;
|
struct drm_mode_modeinfo mode;
|
||||||
@ -268,4 +309,37 @@ struct drm_mode_crtc_lut {
|
|||||||
__u64 blue;
|
__u64 blue;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
|
||||||
|
#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Request a page flip on the specified crtc.
|
||||||
|
*
|
||||||
|
* This ioctl will ask KMS to schedule a page flip for the specified
|
||||||
|
* crtc. Once any pending rendering targeting the specified fb (as of
|
||||||
|
* ioctl time) has completed, the crtc will be reprogrammed to display
|
||||||
|
* that fb after the next vertical refresh. The ioctl returns
|
||||||
|
* immediately, but subsequent rendering to the current fb will block
|
||||||
|
* in the execbuffer ioctl until the page flip happens. If a page
|
||||||
|
* flip is already pending as the ioctl is called, EBUSY will be
|
||||||
|
* returned.
|
||||||
|
*
|
||||||
|
* The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
|
||||||
|
* request that drm sends back a vblank event (see drm.h: struct
|
||||||
|
* drm_event_vblank) when the page flip is done. The user_data field
|
||||||
|
* passed in with this ioctl will be returned as the user_data field
|
||||||
|
* in the vblank event struct.
|
||||||
|
*
|
||||||
|
* The reserved field must be zero until we figure out something
|
||||||
|
* clever to use it for.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct drm_mode_crtc_page_flip {
|
||||||
|
__u32 crtc_id;
|
||||||
|
__u32 fb_id;
|
||||||
|
__u32 flags;
|
||||||
|
__u32 reserved;
|
||||||
|
__u64 user_data;
|
||||||
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -27,11 +27,11 @@
|
|||||||
#ifndef _I915_DRM_H_
|
#ifndef _I915_DRM_H_
|
||||||
#define _I915_DRM_H_
|
#define _I915_DRM_H_
|
||||||
|
|
||||||
|
#include "drm.h"
|
||||||
|
|
||||||
/* Please note that modifications to all structs defined here are
|
/* Please note that modifications to all structs defined here are
|
||||||
* subject to backwards-compatibility constraints.
|
* subject to backwards-compatibility constraints.
|
||||||
*/
|
*/
|
||||||
#include <linux/types.h>
|
|
||||||
#include "drm.h"
|
|
||||||
|
|
||||||
/* Each region is a minimum of 16k, and there are at most 255 of them.
|
/* Each region is a minimum of 16k, and there are at most 255 of them.
|
||||||
*/
|
*/
|
||||||
|
@ -35,7 +35,7 @@
|
|||||||
#ifndef __MGA_DRM_H__
|
#ifndef __MGA_DRM_H__
|
||||||
#define __MGA_DRM_H__
|
#define __MGA_DRM_H__
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include "drm.h"
|
||||||
|
|
||||||
/* WARNING: If you change any of these defines, make sure to change the
|
/* WARNING: If you change any of these defines, make sure to change the
|
||||||
* defines in the Xserver file (mga_sarea.h)
|
* defines in the Xserver file (mga_sarea.h)
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
#ifndef __RADEON_DRM_H__
|
#ifndef __RADEON_DRM_H__
|
||||||
#define __RADEON_DRM_H__
|
#define __RADEON_DRM_H__
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include "drm.h"
|
||||||
|
|
||||||
/* WARNING: If you change any of these defines, make sure to change the
|
/* WARNING: If you change any of these defines, make sure to change the
|
||||||
* defines in the X server file (radeon_sarea.h)
|
* defines in the X server file (radeon_sarea.h)
|
||||||
|
@ -544,6 +544,15 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
|
|||||||
*/
|
*/
|
||||||
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
|
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_tt_populate:
|
||||||
|
*
|
||||||
|
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||||
|
*
|
||||||
|
* Add backing pages to all of @ttm
|
||||||
|
*/
|
||||||
|
extern int ttm_tt_populate(struct ttm_tt *ttm);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ttm_ttm_destroy:
|
* ttm_ttm_destroy:
|
||||||
*
|
*
|
||||||
|
107
include/drm/ttm/ttm_execbuf_util.h
Normal file
107
include/drm/ttm/ttm_execbuf_util.h
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _TTM_EXECBUF_UTIL_H_
|
||||||
|
#define _TTM_EXECBUF_UTIL_H_
|
||||||
|
|
||||||
|
#include "ttm/ttm_bo_api.h"
|
||||||
|
#include <linux/list.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct ttm_validate_buffer
|
||||||
|
*
|
||||||
|
* @head: list head for thread-private list.
|
||||||
|
* @bo: refcounted buffer object pointer.
|
||||||
|
* @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
|
||||||
|
* adding a new sync object.
|
||||||
|
* @reservied: Indicates whether @bo has been reserved for validation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct ttm_validate_buffer {
|
||||||
|
struct list_head head;
|
||||||
|
struct ttm_buffer_object *bo;
|
||||||
|
void *new_sync_obj_arg;
|
||||||
|
bool reserved;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* function ttm_eu_backoff_reservation
|
||||||
|
*
|
||||||
|
* @list: thread private list of ttm_validate_buffer structs.
|
||||||
|
*
|
||||||
|
* Undoes all buffer validation reservations for bos pointed to by
|
||||||
|
* the list entries.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void ttm_eu_backoff_reservation(struct list_head *list);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* function ttm_eu_reserve_buffers
|
||||||
|
*
|
||||||
|
* @list: thread private list of ttm_validate_buffer structs.
|
||||||
|
* @val_seq: A unique sequence number.
|
||||||
|
*
|
||||||
|
* Tries to reserve bos pointed to by the list entries for validation.
|
||||||
|
* If the function returns 0, all buffers are marked as "unfenced",
|
||||||
|
* taken off the lru lists and are not synced for write CPU usage.
|
||||||
|
*
|
||||||
|
* If the function detects a deadlock due to multiple threads trying to
|
||||||
|
* reserve the same buffers in reverse order, all threads except one will
|
||||||
|
* back off and retry. This function may sleep while waiting for
|
||||||
|
* CPU write reservations to be cleared, and for other threads to
|
||||||
|
* unreserve their buffers.
|
||||||
|
*
|
||||||
|
* This function may return -ERESTART or -EAGAIN if the calling process
|
||||||
|
* receives a signal while waiting. In that case, no buffers on the list
|
||||||
|
* will be reserved upon return.
|
||||||
|
*
|
||||||
|
* Buffers reserved by this function should be unreserved by
|
||||||
|
* a call to either ttm_eu_backoff_reservation() or
|
||||||
|
* ttm_eu_fence_buffer_objects() when command submission is complete or
|
||||||
|
* has failed.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* function ttm_eu_fence_buffer_objects.
|
||||||
|
*
|
||||||
|
* @list: thread private list of ttm_validate_buffer structs.
|
||||||
|
* @sync_obj: The new sync object for the buffers.
|
||||||
|
*
|
||||||
|
* This function should be called when command submission is complete, and
|
||||||
|
* it will add a new sync object to bos pointed to by entries on @list.
|
||||||
|
* It also unreserves all buffers, putting them on lru lists.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
|
||||||
|
|
||||||
|
#endif
|
247
include/drm/ttm/ttm_lock.h
Normal file
247
include/drm/ttm/ttm_lock.h
Normal file
@ -0,0 +1,247 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
/** @file ttm_lock.h
|
||||||
|
* This file implements a simple replacement for the buffer manager use
|
||||||
|
* of the DRM heavyweight hardware lock.
|
||||||
|
* The lock is a read-write lock. Taking it in read mode and write mode
|
||||||
|
* is relatively fast, and intended for in-kernel use only.
|
||||||
|
*
|
||||||
|
* The vt mode is used only when there is a need to block all
|
||||||
|
* user-space processes from validating buffers.
|
||||||
|
* It's allowed to leave kernel space with the vt lock held.
|
||||||
|
* If a user-space process dies while having the vt-lock,
|
||||||
|
* it will be released during the file descriptor release. The vt lock
|
||||||
|
* excludes write lock and read lock.
|
||||||
|
*
|
||||||
|
* The suspend mode is used to lock out all TTM users when preparing for
|
||||||
|
* and executing suspend operations.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _TTM_LOCK_H_
|
||||||
|
#define _TTM_LOCK_H_
|
||||||
|
|
||||||
|
#include "ttm/ttm_object.h"
|
||||||
|
#include <linux/wait.h>
|
||||||
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct ttm_lock
|
||||||
|
*
|
||||||
|
* @base: ttm base object used solely to release the lock if the client
|
||||||
|
* holding the lock dies.
|
||||||
|
* @queue: Queue for processes waiting for lock change-of-status.
|
||||||
|
* @lock: Spinlock protecting some lock members.
|
||||||
|
* @rw: Read-write lock counter. Protected by @lock.
|
||||||
|
* @flags: Lock state. Protected by @lock.
|
||||||
|
* @kill_takers: Boolean whether to kill takers of the lock.
|
||||||
|
* @signal: Signal to send when kill_takers is true.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct ttm_lock {
|
||||||
|
struct ttm_base_object base;
|
||||||
|
wait_queue_head_t queue;
|
||||||
|
spinlock_t lock;
|
||||||
|
int32_t rw;
|
||||||
|
uint32_t flags;
|
||||||
|
bool kill_takers;
|
||||||
|
int signal;
|
||||||
|
struct ttm_object_file *vt_holder;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_lock_init
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* Initializes the lock.
|
||||||
|
*/
|
||||||
|
extern void ttm_lock_init(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_read_unlock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Releases a read lock.
|
||||||
|
*/
|
||||||
|
extern void ttm_read_unlock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_read_lock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||||
|
*
|
||||||
|
* Takes the lock in read mode.
|
||||||
|
* Returns:
|
||||||
|
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||||
|
*/
|
||||||
|
extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_read_trylock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||||
|
*
|
||||||
|
* Tries to take the lock in read mode. If the lock is already held
|
||||||
|
* in write mode, the function will return -EBUSY. If the lock is held
|
||||||
|
* in vt or suspend mode, the function will sleep until these modes
|
||||||
|
* are unlocked.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* -EBUSY The lock was already held in write mode.
|
||||||
|
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||||
|
*/
|
||||||
|
extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_write_unlock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Releases a write lock.
|
||||||
|
*/
|
||||||
|
extern void ttm_write_unlock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_write_lock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||||
|
*
|
||||||
|
* Takes the lock in write mode.
|
||||||
|
* Returns:
|
||||||
|
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||||
|
*/
|
||||||
|
extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_lock_downgrade
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Downgrades a write lock to a read lock.
|
||||||
|
*/
|
||||||
|
extern void ttm_lock_downgrade(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_suspend_lock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Takes the lock in suspend mode. Excludes read and write mode.
|
||||||
|
*/
|
||||||
|
extern void ttm_suspend_lock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_suspend_unlock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Releases a suspend lock
|
||||||
|
*/
|
||||||
|
extern void ttm_suspend_unlock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_vt_lock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||||
|
* @tfile: Pointer to a struct ttm_object_file to register the lock with.
|
||||||
|
*
|
||||||
|
* Takes the lock in vt mode.
|
||||||
|
* Returns:
|
||||||
|
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||||
|
* -ENOMEM: Out of memory when locking.
|
||||||
|
*/
|
||||||
|
extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
|
||||||
|
struct ttm_object_file *tfile);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_vt_unlock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Releases a vt lock.
|
||||||
|
* Returns:
|
||||||
|
* -EINVAL If the lock was not held.
|
||||||
|
*/
|
||||||
|
extern int ttm_vt_unlock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_write_unlock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
*
|
||||||
|
* Releases a write lock.
|
||||||
|
*/
|
||||||
|
extern void ttm_write_unlock(struct ttm_lock *lock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_write_lock
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||||
|
*
|
||||||
|
* Takes the lock in write mode.
|
||||||
|
* Returns:
|
||||||
|
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||||
|
*/
|
||||||
|
extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_lock_set_kill
|
||||||
|
*
|
||||||
|
* @lock: Pointer to a struct ttm_lock
|
||||||
|
* @val: Boolean whether to kill processes taking the lock.
|
||||||
|
* @signal: Signal to send to the process taking the lock.
|
||||||
|
*
|
||||||
|
* The kill-when-taking-lock functionality is used to kill processes that keep
|
||||||
|
* on using the TTM functionality when its resources has been taken down, for
|
||||||
|
* example when the X server exits. A typical sequence would look like this:
|
||||||
|
* - X server takes lock in write mode.
|
||||||
|
* - ttm_lock_set_kill() is called with @val set to true.
|
||||||
|
* - As part of X server exit, TTM resources are taken down.
|
||||||
|
* - X server releases the lock on file release.
|
||||||
|
* - Another dri client wants to render, takes the lock and is killed.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
|
||||||
|
int signal)
|
||||||
|
{
|
||||||
|
lock->kill_takers = val;
|
||||||
|
if (val)
|
||||||
|
lock->signal = signal;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
@ -33,6 +33,7 @@
|
|||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/kobject.h>
|
#include <linux/kobject.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct ttm_mem_shrink - callback to shrink TTM memory usage.
|
* struct ttm_mem_shrink - callback to shrink TTM memory usage.
|
||||||
|
267
include/drm/ttm/ttm_object.h
Normal file
267
include/drm/ttm/ttm_object.h
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||||
|
*/
|
||||||
|
/** @file ttm_object.h
|
||||||
|
*
|
||||||
|
* Base- and reference object implementation for the various
|
||||||
|
* ttm objects. Implements reference counting, minimal security checks
|
||||||
|
* and release on file close.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _TTM_OBJECT_H_
|
||||||
|
#define _TTM_OBJECT_H_
|
||||||
|
|
||||||
|
#include <linux/list.h>
|
||||||
|
#include "drm_hashtab.h"
|
||||||
|
#include <linux/kref.h>
|
||||||
|
#include <ttm/ttm_memory.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* enum ttm_ref_type
|
||||||
|
*
|
||||||
|
* Describes what type of reference a ref object holds.
|
||||||
|
*
|
||||||
|
* TTM_REF_USAGE is a simple refcount on a base object.
|
||||||
|
*
|
||||||
|
* TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
|
||||||
|
* buffer object.
|
||||||
|
*
|
||||||
|
* TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
|
||||||
|
* buffer object.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
enum ttm_ref_type {
|
||||||
|
TTM_REF_USAGE,
|
||||||
|
TTM_REF_SYNCCPU_READ,
|
||||||
|
TTM_REF_SYNCCPU_WRITE,
|
||||||
|
TTM_REF_NUM
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* enum ttm_object_type
|
||||||
|
*
|
||||||
|
* One entry per ttm object type.
|
||||||
|
* Device-specific types should use the
|
||||||
|
* ttm_driver_typex types.
|
||||||
|
*/
|
||||||
|
|
||||||
|
enum ttm_object_type {
|
||||||
|
ttm_fence_type,
|
||||||
|
ttm_buffer_type,
|
||||||
|
ttm_lock_type,
|
||||||
|
ttm_driver_type0 = 256,
|
||||||
|
ttm_driver_type1
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ttm_object_file;
|
||||||
|
struct ttm_object_device;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct ttm_base_object
|
||||||
|
*
|
||||||
|
* @hash: hash entry for the per-device object hash.
|
||||||
|
* @type: derived type this object is base class for.
|
||||||
|
* @shareable: Other ttm_object_files can access this object.
|
||||||
|
*
|
||||||
|
* @tfile: Pointer to ttm_object_file of the creator.
|
||||||
|
* NULL if the object was not created by a user request.
|
||||||
|
* (kernel object).
|
||||||
|
*
|
||||||
|
* @refcount: Number of references to this object, not
|
||||||
|
* including the hash entry. A reference to a base object can
|
||||||
|
* only be held by a ref object.
|
||||||
|
*
|
||||||
|
* @refcount_release: A function to be called when there are
|
||||||
|
* no more references to this object. This function should
|
||||||
|
* destroy the object (or make sure destruction eventually happens),
|
||||||
|
* and when it is called, the object has
|
||||||
|
* already been taken out of the per-device hash. The parameter
|
||||||
|
* "base" should be set to NULL by the function.
|
||||||
|
*
|
||||||
|
* @ref_obj_release: A function to be called when a reference object
|
||||||
|
* with another ttm_ref_type than TTM_REF_USAGE is deleted.
|
||||||
|
* this function may, for example, release a lock held by a user-space
|
||||||
|
* process.
|
||||||
|
*
|
||||||
|
* This struct is intended to be used as a base struct for objects that
|
||||||
|
* are visible to user-space. It provides a global name, race-safe
|
||||||
|
* access and refcounting, minimal access contol and hooks for unref actions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct ttm_base_object {
|
||||||
|
struct drm_hash_item hash;
|
||||||
|
enum ttm_object_type object_type;
|
||||||
|
bool shareable;
|
||||||
|
struct ttm_object_file *tfile;
|
||||||
|
struct kref refcount;
|
||||||
|
void (*refcount_release) (struct ttm_base_object **base);
|
||||||
|
void (*ref_obj_release) (struct ttm_base_object *base,
|
||||||
|
enum ttm_ref_type ref_type);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_base_object_init
|
||||||
|
*
|
||||||
|
* @tfile: Pointer to a struct ttm_object_file.
|
||||||
|
* @base: The struct ttm_base_object to initialize.
|
||||||
|
* @shareable: This object is shareable with other applcations.
|
||||||
|
* (different @tfile pointers.)
|
||||||
|
* @type: The object type.
|
||||||
|
* @refcount_release: See the struct ttm_base_object description.
|
||||||
|
* @ref_obj_release: See the struct ttm_base_object description.
|
||||||
|
*
|
||||||
|
* Initializes a struct ttm_base_object.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern int ttm_base_object_init(struct ttm_object_file *tfile,
|
||||||
|
struct ttm_base_object *base,
|
||||||
|
bool shareable,
|
||||||
|
enum ttm_object_type type,
|
||||||
|
void (*refcount_release) (struct ttm_base_object
|
||||||
|
**),
|
||||||
|
void (*ref_obj_release) (struct ttm_base_object
|
||||||
|
*,
|
||||||
|
enum ttm_ref_type
|
||||||
|
ref_type));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_base_object_lookup
|
||||||
|
*
|
||||||
|
* @tfile: Pointer to a struct ttm_object_file.
|
||||||
|
* @key: Hash key
|
||||||
|
*
|
||||||
|
* Looks up a struct ttm_base_object with the key @key.
|
||||||
|
* Also verifies that the object is visible to the application, by
|
||||||
|
* comparing the @tfile argument and checking the object shareable flag.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
|
||||||
|
*tfile, uint32_t key);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_base_object_unref
|
||||||
|
*
|
||||||
|
* @p_base: Pointer to a pointer referncing a struct ttm_base_object.
|
||||||
|
*
|
||||||
|
* Decrements the base object refcount and clears the pointer pointed to by
|
||||||
|
* p_base.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void ttm_base_object_unref(struct ttm_base_object **p_base);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_ref_object_add.
|
||||||
|
*
|
||||||
|
* @tfile: A struct ttm_object_file representing the application owning the
|
||||||
|
* ref_object.
|
||||||
|
* @base: The base object to reference.
|
||||||
|
* @ref_type: The type of reference.
|
||||||
|
* @existed: Upon completion, indicates that an identical reference object
|
||||||
|
* already existed, and the refcount was upped on that object instead.
|
||||||
|
*
|
||||||
|
* Adding a ref object to a base object is basically like referencing the
|
||||||
|
* base object, but a user-space application holds the reference. When the
|
||||||
|
* file corresponding to @tfile is closed, all its reference objects are
|
||||||
|
* deleted. A reference object can have different types depending on what
|
||||||
|
* it's intended for. It can be refcounting to prevent object destruction,
|
||||||
|
* When user-space takes a lock, it can add a ref object to that lock to
|
||||||
|
* make sure the lock is released if the application dies. A ref object
|
||||||
|
* will hold a single reference on a base object.
|
||||||
|
*/
|
||||||
|
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||||
|
struct ttm_base_object *base,
|
||||||
|
enum ttm_ref_type ref_type, bool *existed);
|
||||||
|
/**
|
||||||
|
* ttm_ref_object_base_unref
|
||||||
|
*
|
||||||
|
* @key: Key representing the base object.
|
||||||
|
* @ref_type: Ref type of the ref object to be dereferenced.
|
||||||
|
*
|
||||||
|
* Unreference a ref object with type @ref_type
|
||||||
|
* on the base object identified by @key. If there are no duplicate
|
||||||
|
* references, the ref object will be destroyed and the base object
|
||||||
|
* will be unreferenced.
|
||||||
|
*/
|
||||||
|
extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
||||||
|
unsigned long key,
|
||||||
|
enum ttm_ref_type ref_type);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_object_file_init - initialize a struct ttm_object file
|
||||||
|
*
|
||||||
|
* @tdev: A struct ttm_object device this file is initialized on.
|
||||||
|
* @hash_order: Order of the hash table used to hold the reference objects.
|
||||||
|
*
|
||||||
|
* This is typically called by the file_ops::open function.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
|
||||||
|
*tdev,
|
||||||
|
unsigned int hash_order);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_object_file_release - release data held by a ttm_object_file
|
||||||
|
*
|
||||||
|
* @p_tfile: Pointer to pointer to the ttm_object_file object to release.
|
||||||
|
* *p_tfile will be set to NULL by this function.
|
||||||
|
*
|
||||||
|
* Releases all data associated by a ttm_object_file.
|
||||||
|
* Typically called from file_ops::release. The caller must
|
||||||
|
* ensure that there are no concurrent users of tfile.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_object device init - initialize a struct ttm_object_device
|
||||||
|
*
|
||||||
|
* @hash_order: Order of hash table used to hash the base objects.
|
||||||
|
*
|
||||||
|
* This function is typically called on device initialization to prepare
|
||||||
|
* data structures needed for ttm base and ref objects.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern struct ttm_object_device *ttm_object_device_init
|
||||||
|
(struct ttm_mem_global *mem_glob, unsigned int hash_order);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_object_device_release - release data held by a ttm_object_device
|
||||||
|
*
|
||||||
|
* @p_tdev: Pointer to pointer to the ttm_object_device object to release.
|
||||||
|
* *p_tdev will be set to NULL by this function.
|
||||||
|
*
|
||||||
|
* Releases all data associated by a ttm_object_device.
|
||||||
|
* Typically called from driver::unload before the destruction of the
|
||||||
|
* device private data structure.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
|
||||||
|
|
||||||
|
#endif
|
@ -24,7 +24,7 @@
|
|||||||
#ifndef _VIA_DRM_H_
|
#ifndef _VIA_DRM_H_
|
||||||
#define _VIA_DRM_H_
|
#define _VIA_DRM_H_
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include "drm.h"
|
||||||
|
|
||||||
/* WARNING: These defines must be the same as what the Xserver uses.
|
/* WARNING: These defines must be the same as what the Xserver uses.
|
||||||
* if you change them, you must change the defines in the Xserver.
|
* if you change them, you must change the defines in the Xserver.
|
||||||
|
Loading…
Reference in New Issue
Block a user