Commit dfbbe89e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (53 commits)
  drm: detect hdmi monitor by hdmi identifier (v3)
  drm: drm_fops.c unlock missing on error path
  drm: reorder struct drm_ioctl_desc to save space on 64 bit builds
  radeon: add some new pci ids
  drm: read EDID extensions from monitor
  drm: Use a little stash on the stack to avoid kmalloc in most DRM ioctls.
  drm/radeon: add regs required for occlusion queries support
  drm/i915: check the return value from the copy from user
  drm/radeon: fix logic in r600_page_table_init() to match ati_gart
  drm/radeon: r600 ptes are 64-bit, cleanup cleanup function.
  drm/radeon: don't call irq changes on r600 suspend/resume
  drm/radeon: fix r600 writeback across suspend/resume
  drm/radeon: fix r600 writeback setup.
  drm: fix warnings about new mappings in info code.
  drm/radeon: NULL noise: drivers/gpu/drm/radeon/radeon_*.c
  drm/radeon: fix r600 pci mapping calls.
  drm/radeon: r6xx/r7xx: fix possible oops in r600_page_table_cleanup()
  radeon: call the correct idle function, logic got inverted.
  drm/radeon: RS600: fix interrupt handling
  drm/r600: fix rptr address along lines of previous fixes to radeon.
  ...
parents 712b0006 f23c20c8
......@@ -77,7 +77,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
if (!entry->busaddr[i])
break;
pci_unmap_page(dev->pdev, entry->busaddr[i],
PAGE_SIZE, PCI_DMA_TODEVICE);
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
......@@ -95,13 +95,14 @@ EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
struct drm_local_map *map = &gart_info->mapping;
struct drm_sg_mem *entry = dev->sg;
void *address = NULL;
unsigned long pages;
u32 *pci_gart, page_base;
u32 *pci_gart = NULL, page_base, gart_idx;
dma_addr_t bus_address = 0;
int i, j, ret = 0;
int max_pages;
int max_ati_pages, max_real_pages;
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
......@@ -117,6 +118,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
goto done;
}
pci_gart = gart_info->table_handle->vaddr;
address = gart_info->table_handle->vaddr;
bus_address = gart_info->table_handle->busaddr;
} else {
......@@ -127,18 +129,23 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
(unsigned long)address);
}
pci_gart = (u32 *) address;
max_pages = (gart_info->table_size / sizeof(u32));
pages = (entry->pages <= max_pages)
? entry->pages : max_pages;
max_ati_pages = (gart_info->table_size / sizeof(u32));
max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
pages = (entry->pages <= max_real_pages)
? entry->pages : max_real_pages;
memset(pci_gart, 0, max_pages * sizeof(u32));
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
memset(pci_gart, 0, max_ati_pages * sizeof(u32));
} else {
memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
}
gart_idx = 0;
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
0, PAGE_SIZE, PCI_DMA_TODEVICE);
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (entry->busaddr[i] == 0) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
......@@ -149,19 +156,26 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
page_base = (u32) entry->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
u32 val;
switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP:
*pci_gart = cpu_to_le32((page_base) | 0xc);
val = page_base | 0xc;
break;
case DRM_ATI_GART_PCIE:
*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
val = (page_base >> 8) | 0xc;
break;
default:
case DRM_ATI_GART_PCI:
*pci_gart = cpu_to_le32(page_base);
val = page_base;
break;
}
pci_gart++;
if (gart_info->gart_table_location ==
DRM_ATI_GART_MAIN)
pci_gart[gart_idx] = cpu_to_le32(val);
else
DRM_WRITE32(map, gart_idx * sizeof(u32), val);
gart_idx++;
page_base += ATI_PCIGART_PAGE_SIZE;
}
}
......
......@@ -34,15 +34,17 @@
*/
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <asm/shmparam.h>
#include "drmP.h"
unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
{
return pci_resource_start(dev->pdev, resource);
}
EXPORT_SYMBOL(drm_get_resource_start);
unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
{
return pci_resource_len(dev->pdev, resource);
}
......@@ -50,24 +52,44 @@ unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource
EXPORT_SYMBOL(drm_get_resource_len);
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
drm_local_map_t *map)
struct drm_local_map *map)
{
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) &&
((entry->map->offset == map->offset) ||
((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
/*
* Because the kernel-userspace ABI is fixed at a 32-bit offset
* while PCI resources may live above that, we ignore the map
* offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
* It is assumed that each driver will have only one resource of
* each type.
*/
if (!entry->map ||
map->type != entry->map->type ||
entry->master != dev->primary->master)
continue;
switch (map->type) {
case _DRM_SHM:
if (map->flags != _DRM_CONTAINS_LOCK)
break;
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
return entry;
default: /* Make gcc happy */
;
}
if (entry->map->offset == map->offset)
return entry;
}
return NULL;
}
static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
unsigned long user_token, int hashed_handle)
unsigned long user_token, int hashed_handle, int shm)
{
int use_hashed_handle;
int use_hashed_handle, shift;
unsigned long add;
#if (BITS_PER_LONG == 64)
use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
#elif (BITS_PER_LONG == 32)
......@@ -83,30 +105,47 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
if (ret != -EINVAL)
return ret;
}
shift = 0;
add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
if (shm && (SHMLBA > PAGE_SIZE)) {
int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
/* For shared memory, we have to preserve the SHMLBA
* bits of the eventual vma->vm_pgoff value during
* mmap(). Otherwise we run into cache aliasing problems
* on some platforms. On these platforms, the pgoff of
* a mmap() request is used to pick a suitable virtual
* address for the mmap() region such that it will not
* cause cache aliasing problems.
*
* Therefore, make sure the SHMLBA relevant bits of the
* hash value we use are equal to those in the original
* kernel virtual address.
*/
shift = bits;
add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
}
return drm_ht_just_insert_please(&dev->map_hash, hash,
user_token, 32 - PAGE_SHIFT - 3,
0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
shift, add);
}
/**
* Ioctl to specify a range of memory that is available for mapping by a non-root process.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_map structure.
* \return zero on success or a negative value on error.
* Core function to create a range of memory available for mapping by a
* non-root process.
*
* Adjusts the memory offset to its absolute value according to the mapping
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
* applicable and if supported by the kernel.
*/
static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags,
struct drm_map_list ** maplist)
{
struct drm_map *map;
struct drm_local_map *map;
struct drm_map_list *list;
drm_dma_handle_t *dmah;
unsigned long user_token;
......@@ -129,9 +168,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
}
DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
map->offset, map->size, map->type);
if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
(unsigned long long)map->offset, map->size, map->type);
if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
}
......@@ -259,7 +298,8 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EPERM;
}
DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
(unsigned long long)map->offset, map->size);
break;
case _DRM_GEM:
......@@ -309,7 +349,8 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
/* We do it here so that dev->struct_mutex protects the increment */
user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
map->offset;
ret = drm_map_handle(dev, &list->hash, user_token, 0);
ret = drm_map_handle(dev, &list->hash, user_token, 0,
(map->type == _DRM_SHM));
if (ret) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
......@@ -327,9 +368,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
return 0;
}
int drm_addmap(struct drm_device * dev, unsigned int offset,
int drm_addmap(struct drm_device * dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, drm_local_map_t ** map_ptr)
enum drm_map_flags flags, struct drm_local_map ** map_ptr)
{
struct drm_map_list *list;
int rc;
......@@ -342,6 +383,17 @@ int drm_addmap(struct drm_device * dev, unsigned int offset,
EXPORT_SYMBOL(drm_addmap);
/**
* Ioctl to specify a range of memory that is available for mapping by a
* non-root process.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_map structure.
* \return zero on success or a negative value on error.
*
*/
int drm_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
......@@ -367,19 +419,13 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
* Remove a map private from list and deallocate resources if the mapping
* isn't in use.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a struct drm_map structure.
* \return zero on success or a negative value on error.
*
* Searches the map on drm_device::maplist, removes it from the list, see if
* its being used, and free any associate resource (such as MTRR's) if it's not
* being on use.
*
* \sa drm_addmap
*/
int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
{
struct drm_map_list *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
......@@ -442,7 +488,7 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
}
EXPORT_SYMBOL(drm_rmmap_locked);
int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
{
int ret;
......@@ -462,12 +508,18 @@ EXPORT_SYMBOL(drm_rmmap);
* One use case might be after addmap is allowed for normal users for SHM and
* gets used by drivers that the server doesn't need to care about. This seems
* unlikely.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a struct drm_map structure.
* \return zero on success or a negative value on error.
*/
int drm_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *request = data;
drm_local_map_t *map = NULL;
struct drm_local_map *map = NULL;
struct drm_map_list *r_list;
int ret;
......@@ -1534,7 +1586,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
&& (dma->flags & _DRM_DMA_USE_SG))
|| (drm_core_check_feature(dev, DRIVER_FB_DMA)
&& (dma->flags & _DRM_DMA_USE_FB))) {
struct drm_map *map = dev->agp_buffer_map;
struct drm_local_map *map = dev->agp_buffer_map;
unsigned long token = dev->agp_buffer_token;
if (!map) {
......
......@@ -143,7 +143,7 @@ int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_map *map;
struct drm_local_map *map;
struct drm_map_list *_entry;
mutex_lock(&dev->struct_mutex);
......@@ -186,7 +186,7 @@ int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_map *map = NULL;
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
mutex_lock(&dev->struct_mutex);
......
......@@ -254,15 +254,19 @@ int drm_lastclose(struct drm_device * dev)
int drm_init(struct drm_driver *driver)
{
struct pci_dev *pdev = NULL;
struct pci_device_id *pid;
const struct pci_device_id *pid;
int i;
DRM_DEBUG("\n");
INIT_LIST_HEAD(&driver->device_list);
if (driver->driver_features & DRIVER_MODESET)
return pci_register_driver(&driver->pci_driver);
/* If not using KMS, fall back to stealth mode manual scanning. */
for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
pid = &driver->pci_driver.id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
......@@ -287,68 +291,17 @@ int drm_init(struct drm_driver *driver)
EXPORT_SYMBOL(drm_init);
/**
* Called via cleanup_module() at module unload time.
*
* Cleans up all DRM device, calling drm_lastclose().
*
* \sa drm_init
*/
static void drm_cleanup(struct drm_device * dev)
{
struct drm_map_list *r_list, *list_temp;
DRM_DEBUG("\n");
if (!dev) {
DRM_ERROR("cleanup called no dev\n");
return;
}
drm_vblank_cleanup(dev);
drm_lastclose(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
dev->agp && dev->agp->agp_mtrr >= 0) {
int retval;
retval = mtrr_del(dev->agp->agp_mtrr,
dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size * 1024 * 1024);
DRM_DEBUG("mtrr_del=%d\n", retval);
}
if (dev->driver->unload)
dev->driver->unload(dev);
if (drm_core_has_AGP(dev) && dev->agp) {
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_rmmap(dev, r_list->map);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
drm_put_minor(&dev->primary);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
}
void drm_exit(struct drm_driver *driver)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
drm_cleanup(dev);
if (driver->driver_features & DRIVER_MODESET) {
pci_unregister_driver(&driver->pci_driver);
} else {
list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
drm_put_dev(dev);
}
DRM_INFO("Module unloaded\n");
}
......@@ -468,6 +421,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
atomic_inc(&dev->ioctl_count);
......@@ -506,10 +460,14 @@ int drm_ioctl(struct inode *inode, struct file *filp,
retcode = -EACCES;
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
}
}
}
......@@ -530,7 +488,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
}
err_i1:
if (kdata)
if (kdata != stack_kdata)
kfree(kdata);
atomic_dec(&dev->ioctl_count);
if (retcode)
......@@ -540,7 +498,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
EXPORT_SYMBOL(drm_ioctl);
drm_local_map_t *drm_getsarea(struct drm_device *dev)
struct drm_local_map *drm_getsarea(struct drm_device *dev)
{
struct drm_map_list *entry;
......
......@@ -550,11 +550,20 @@ static int add_detailed_info(struct drm_connector *connector,
}
#define DDC_ADDR 0x50
unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter)
/**
* Get EDID information via I2C.
*
* \param adapter : i2c device adaptor
* \param buf : EDID data buffer to be filled
* \param len : EDID data buffer length
* \return 0 on success or -1 on failure.
*
* Try to fetch EDID information by calling i2c driver function.
*/
int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
unsigned char *buf, int len)
{
unsigned char start = 0x0;
unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
......@@ -564,31 +573,36 @@ unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter)
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = EDID_LENGTH,
.len = len,
.buf = buf,
}
};
if (!buf) {
dev_warn(&adapter->dev, "unable to allocate memory for EDID "
"block.\n");
return NULL;
}
if (i2c_transfer(adapter, msgs, 2) == 2)
return buf;
return 0;
dev_info(&adapter->dev, "unable to read EDID block.\n");
kfree(buf);
return NULL;
return -1;
}
EXPORT_SYMBOL(drm_do_probe_ddc_edid);
static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
/**
* Get EDID information.
*
* \param adapter : i2c device adaptor.
* \param buf : EDID data buffer to be filled
* \param len : EDID data buffer length
* \return 0 on success or -1 on failure.
*
* Initialize DDC, then fetch EDID information
* by calling drm_do_probe_ddc_edid function.
*/
static int drm_ddc_read(struct i2c_adapter *adapter,
unsigned char *buf, int len)
{
struct i2c_algo_bit_data *algo_data = adapter->algo_data;
unsigned char *edid = NULL;
int i, j;
int ret = -1;
algo_data->setscl(algo_data->data, 1);
......@@ -616,7 +630,7 @@ static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
msleep(15);
/* Do the real work */
edid = drm_do_probe_ddc_edid(adapter);
ret = drm_do_probe_ddc_edid(adapter, buf, len);
algo_data->setsda(algo_data->data, 0);
algo_data->setscl(algo_data->data, 0);
msleep(15);
......@@ -632,7 +646,7 @@ static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
msleep(15);
algo_data->setscl(algo_data->data, 0);
algo_data->setsda(algo_data->data, 0);
if (edid)
if (ret == 0)
break;
}
/* Release the DDC lines when done or the Apple Cinema HD display
......@@ -641,9 +655,31 @@ static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
algo_data->setsda(algo_data->data, 1);
algo_data->setscl(algo_data->data, 1);
return edid;
return ret;
}
static int drm_ddc_read_edid(struct drm_connector *connector,
struct i2c_adapter *adapter,
char *buf, int len)
{
int ret;
ret = drm_ddc_read(adapter, buf, len);
if (ret != 0) {
dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
drm_get_connector_name(connector));
goto end;
}
if (!edid_is_valid((struct edid *)buf)) {
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
ret = -1;
}
end:
return ret;
}
#define MAX_EDID_EXT_NUM 4
/**
* drm_get_edid - get EDID data, if available
* @connector: connector we're probing
......@@ -656,27 +692,118 @@ static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
int ret;
struct edid *edid;
edid = (struct edid *)drm_ddc_read(adapter);
if (!edid) {
dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
drm_get_connector_name(connector));
return NULL;
edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
GFP_KERNEL);