Commit ebb4949e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU UPDATES from Joerg Roedel:

 - KVM PCIe/MSI passthrough support on ARM/ARM64

 - introduction of a core representation for individual hardware iommus

 - support for IOMMU privileged mappings as supported by some ARM IOMMUS

 - 16-bit SID support for ARM-SMMUv2

 - stream table optimization for ARM-SMMUv3

 - various fixes and other small improvements

* tag 'iommu-updates-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (61 commits)
  vfio/type1: Fix error return code in vfio_iommu_type1_attach_group()
  iommu: Remove iommu_register_instance interface
  iommu/exynos: Make use of iommu_device_register interface
  iommu/mediatek: Make use of iommu_device_register interface
  iommu/msm: Make use of iommu_device_register interface
  iommu/arm-smmu: Make use of the iommu_register interface
  iommu: Add iommu_device_set_fwnode() interface
  iommu: Make iommu_device_link/unlink take a struct iommu_device
  iommu: Add sysfs bindings for struct iommu_device
  iommu: Introduce new 'struct iommu_device'
  iommu: Rename struct iommu_device
  iommu: Rename iommu_get_instance()
  iommu: Fix static checker warning in iommu_insert_device_resv_regions
  iommu: Avoid unnecessary assignment of dev->iommu_fwspec
  iommu/mediatek: Remove bogus 'select' statements
  iommu/dma: Remove bogus dma_supported() implementation
  iommu/ipmmu-vmsa: Restrict IOMMU Domain Geometry to 32-bit address space
  iommu/vt-d: Don't over-free page table directories
  iommu/vt-d: Tylersburg isoch identity map check is done too late.
  iommu/vt-d: Fix some macros that are incorrectly specified in intel-iommu
  ...
parents 937b5b5d 8d2932dd
......@@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub-
file if the IOMMU driver has chosen to register a more
common name for the group.
Users:
What: /sys/kernel/iommu_groups/reserved_regions
Date: January 2017
KernelVersion: v4.11
Contact: Eric Auger <eric.auger@redhat.com>
Description: /sys/kernel/iommu_groups/reserved_regions list IOVA
regions that are reserved. Not necessarily all
reserved regions are listed. This is typically used to
output direct-mapped, MSI, non mappable regions. Each
region is described on a single line: the 1st field is
the base IOVA, the second is the end IOVA and the third
field describes the type of the region.
......@@ -143,3 +143,13 @@ So, this provides a way for drivers to avoid those error messages on calls
where allocation failures are not a problem, and shouldn't bother the logs.
NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
DMA_ATTR_PRIVILEGED
------------------------------
Some advanced peripherals such as remote processors and GPUs perform
accesses to DMA buffers in both privileged "supervisor" and unprivileged
"user" modes. This attribute is used to indicate to the DMA-mapping
subsystem that the buffer is fully accessible at the elevated privilege
level (and ideally inaccessible or at least read-only at the
lesser-privileged levels).
......@@ -1171,6 +1171,25 @@ core_initcall(dma_debug_do_init);
#ifdef CONFIG_ARM_DMA_USE_IOMMU
static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
{
int prot = 0;
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
switch (dir) {
case DMA_BIDIRECTIONAL:
return prot | IOMMU_READ | IOMMU_WRITE;
case DMA_TO_DEVICE:
return prot | IOMMU_READ;
case DMA_FROM_DEVICE:
return prot | IOMMU_WRITE;
default:
return prot;
}
}
/* IOMMU */
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
......@@ -1394,7 +1413,8 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
* Create a mapping in device IO address space for specified pages
*/
static dma_addr_t
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
......@@ -1419,7 +1439,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
len = (j - i) << PAGE_SHIFT;
ret = iommu_map(mapping->domain, iova, phys, len,
IOMMU_READ|IOMMU_WRITE);
__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
if (ret < 0)
goto fail;
iova += len;
......@@ -1476,7 +1496,8 @@ static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
}
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
dma_addr_t *handle, int coherent_flag)
dma_addr_t *handle, int coherent_flag,
unsigned long attrs)
{
struct page *page;
void *addr;
......@@ -1488,7 +1509,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
if (!addr)
return NULL;
*handle = __iommu_create_mapping(dev, &page, size);
*handle = __iommu_create_mapping(dev, &page, size, attrs);
if (*handle == DMA_ERROR_CODE)
goto err_mapping;
......@@ -1522,7 +1543,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
return __iommu_alloc_simple(dev, size, gfp, handle,
coherent_flag);
coherent_flag, attrs);
/*
* Following is a work-around (a.k.a. hack) to prevent pages
......@@ -1537,7 +1558,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (!pages)
return NULL;
*handle = __iommu_create_mapping(dev, pages, size);
*handle = __iommu_create_mapping(dev, pages, size, attrs);
if (*handle == DMA_ERROR_CODE)
goto err_buffer;
......@@ -1672,27 +1693,6 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
GFP_KERNEL);
}
static int __dma_direction_to_prot(enum dma_data_direction dir)
{
int prot;
switch (dir) {
case DMA_BIDIRECTIONAL:
prot = IOMMU_READ | IOMMU_WRITE;
break;
case DMA_TO_DEVICE:
prot = IOMMU_READ;
break;
case DMA_FROM_DEVICE:
prot = IOMMU_WRITE;
break;
default:
prot = 0;
}
return prot;
}
/*
* Map a part of the scatter-gather list into contiguous io address space
*/
......@@ -1722,7 +1722,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
prot = __dma_direction_to_prot(dir);
prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, iova, phys, len, prot);
if (ret < 0)
......@@ -1930,7 +1930,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
prot = __dma_direction_to_prot(dir);
prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
if (ret < 0)
......@@ -2036,7 +2036,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
if (ret < 0)
......
......@@ -558,7 +558,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
size_t iosize = size;
void *addr;
......@@ -712,7 +712,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
int prot = dma_direction_to_prot(dir, coherent);
int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
if (!iommu_dma_mapping_error(dev, dev_addr) &&
......@@ -770,7 +770,7 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
return iommu_dma_map_sg(dev, sgl, nelems,
dma_direction_to_prot(dir, coherent));
dma_info_to_prot(dir, coherent, attrs));
}
static void __iommu_unmap_sg_attrs(struct device *dev,
......@@ -799,7 +799,6 @@ static struct dma_map_ops iommu_dma_ops = {
.sync_sg_for_device = __iommu_sync_sg_for_device,
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
.dma_supported = iommu_dma_supported,
.mapping_error = iommu_dma_mapping_error,
};
......
......@@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
if (!iort_fwnode)
return NULL;
ops = iommu_get_instance(iort_fwnode);
ops = iommu_ops_from_fwnode(iort_fwnode);
if (!ops)
return NULL;
......
......@@ -1859,9 +1859,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
* Alloc MicroCode buffer for 'chans' Channel threads.
* A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
*/
pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
chans * pl330->mcbufsz,
&pl330->mcode_bus, GFP_KERNEL);
&pl330->mcode_bus, GFP_KERNEL,
DMA_ATTR_PRIVILEGED);
if (!pl330->mcode_cpu) {
dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
__func__, __LINE__);
......
......@@ -352,9 +352,6 @@ config MTK_IOMMU_V1
select IOMMU_API
select MEMORY
select MTK_SMI
select COMMON_CLK_MT2701_MMSYS
select COMMON_CLK_MT2701_IMGSYS
select COMMON_CLK_MT2701_VDECSYS
help
Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is
Multimedia Memory Managememt Unit. This option enables remapping of
......
......@@ -112,7 +112,7 @@ static struct timer_list queue_timer;
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
*/
static const struct iommu_ops amd_iommu_ops;
const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
......@@ -445,6 +445,7 @@ static void init_iommu_group(struct device *dev)
static int iommu_init_device(struct device *dev)
{
struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
int devid;
if (dev->archdata.iommu)
......@@ -454,6 +455,8 @@ static int iommu_init_device(struct device *dev)
if (devid < 0)
return devid;
iommu = amd_iommu_rlookup_table[devid];
dev_data = find_dev_data(devid);
if (!dev_data)
return -ENOMEM;
......@@ -469,8 +472,7 @@ static int iommu_init_device(struct device *dev)
dev->archdata.iommu = dev_data;
iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
dev);
iommu_device_link(&iommu->iommu, dev);
return 0;
}
......@@ -495,13 +497,16 @@ static void iommu_ignore_device(struct device *dev)
static void iommu_uninit_device(struct device *dev)
{
int devid;
struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
int devid;
devid = get_device_id(dev);
if (devid < 0)
return;
iommu = amd_iommu_rlookup_table[devid];
dev_data = search_dev_data(devid);
if (!dev_data)
return;
......@@ -509,8 +514,7 @@ static void iommu_uninit_device(struct device *dev)
if (dev_data->domain)
detach_device(dev);
iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
dev);
iommu_device_unlink(&iommu->iommu, dev);
iommu_group_remove_device(dev);
......@@ -3161,9 +3165,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return false;
}
static void amd_iommu_get_dm_regions(struct device *dev,
struct list_head *head)
static void amd_iommu_get_resv_regions(struct device *dev,
struct list_head *head)
{
struct iommu_resv_region *region;
struct unity_map_entry *entry;
int devid;
......@@ -3172,41 +3177,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
return;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
struct iommu_dm_region *region;
size_t length;
int prot = 0;
if (devid < entry->devid_start || devid > entry->devid_end)
continue;
region = kzalloc(sizeof(*region), GFP_KERNEL);
length = entry->address_end - entry->address_start;
if (entry->prot & IOMMU_PROT_IR)
prot |= IOMMU_READ;
if (entry->prot & IOMMU_PROT_IW)
prot |= IOMMU_WRITE;
region = iommu_alloc_resv_region(entry->address_start,
length, prot,
IOMMU_RESV_DIRECT);
if (!region) {
pr_err("Out of memory allocating dm-regions for %s\n",
dev_name(dev));
return;
}
region->start = entry->address_start;
region->length = entry->address_end - entry->address_start;
if (entry->prot & IOMMU_PROT_IR)
region->prot |= IOMMU_READ;
if (entry->prot & IOMMU_PROT_IW)
region->prot |= IOMMU_WRITE;
list_add_tail(&region->list, head);
}
region = iommu_alloc_resv_region(MSI_RANGE_START,
MSI_RANGE_END - MSI_RANGE_START + 1,
0, IOMMU_RESV_RESERVED);
if (!region)
return;
list_add_tail(&region->list, head);
region = iommu_alloc_resv_region(HT_RANGE_START,
HT_RANGE_END - HT_RANGE_START + 1,
0, IOMMU_RESV_RESERVED);
if (!region)
return;
list_add_tail(&region->list, head);
}
static void amd_iommu_put_dm_regions(struct device *dev,
static void amd_iommu_put_resv_regions(struct device *dev,
struct list_head *head)
{
struct iommu_dm_region *entry, *next;
struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list)
kfree(entry);
}
static void amd_iommu_apply_dm_region(struct device *dev,
static void amd_iommu_apply_resv_region(struct device *dev,
struct iommu_domain *domain,
struct iommu_dm_region *region)
struct iommu_resv_region *region)
{
struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
unsigned long start, end;
......@@ -3217,7 +3237,7 @@ static void amd_iommu_apply_dm_region(struct device *dev,
WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
}
static const struct iommu_ops amd_iommu_ops = {
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
.domain_free = amd_iommu_domain_free,
......@@ -3230,9 +3250,9 @@ static const struct iommu_ops amd_iommu_ops = {
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
.device_group = amd_iommu_device_group,
.get_dm_regions = amd_iommu_get_dm_regions,
.put_dm_regions = amd_iommu_put_dm_regions,
.apply_dm_region = amd_iommu_apply_dm_region,
.get_resv_regions = amd_iommu_get_resv_regions,
.put_resv_regions = amd_iommu_put_resv_regions,
.apply_resv_region = amd_iommu_apply_resv_region,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
......
......@@ -94,6 +94,8 @@
* out of it.
*/
extern const struct iommu_ops amd_iommu_ops;
/*
* structure describing one IOMMU in the ACPI table. Typically followed by one
* or more ivhd_entrys.
......@@ -1635,9 +1637,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_erratum_746_workaround(iommu);
amd_iommu_ats_write_check_workaround(iommu);
iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
amd_iommu_groups, "ivhd%d",
iommu->index);
iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
amd_iommu_groups, "ivhd%d", iommu->index);
iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
iommu_device_register(&iommu->iommu);
return pci_enable_device(iommu->dev);
}
......@@ -2230,7 +2233,7 @@ static int __init early_amd_iommu_init(void)
*/
ret = check_ivrs_checksum(ivrs_base);
if (ret)
return ret;
goto out;
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
......
......@@ -535,8 +535,8 @@ struct amd_iommu {
/* if one, we need to send a completion wait command */
bool need_sync;
/* IOMMU sysfs device */
struct device *iommu_dev;
/* Handle for IOMMU core code */
struct iommu_device iommu;
/*
* We can't rely on the BIOS to restore all values on reinit, so we
......
......@@ -269,9 +269,6 @@
#define STRTAB_STE_1_SHCFG_INCOMING 1UL
#define STRTAB_STE_1_SHCFG_SHIFT 44
#define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
#define STRTAB_STE_1_PRIVCFG_SHIFT 48
#define STRTAB_STE_2_S2VMID_SHIFT 0
#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
#define STRTAB_STE_2_VTCR_SHIFT 32
......@@ -412,6 +409,9 @@
/* High-level queue structures */
#define ARM_SMMU_POLL_TIMEOUT_US 100
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
static bool disable_bypass;
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
......@@ -616,6 +616,9 @@ struct arm_smmu_device {
unsigned int sid_bits;
struct arm_smmu_strtab_cfg strtab_cfg;
/* IOMMU core code handle */
struct iommu_device iommu;
};
/* SMMU private data for each master */
......@@ -1042,13 +1045,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
}
}
/* Nuke the existing Config, as we're going to rewrite it */
val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
if (ste->valid)
val |= STRTAB_STE_0_V;
else
val &= ~STRTAB_STE_0_V;
/* Nuke the existing STE_0 value, as we're going to rewrite it */
val = ste->valid ? STRTAB_STE_0_V : 0;
if (ste->bypass) {
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
......@@ -1073,9 +1071,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
#ifdef CONFIG_PCI_ATS
STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
#endif
STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT |
STRTAB_STE_1_PRIVCFG_UNPRIV <<
STRTAB_STE_1_PRIVCFG_SHIFT);
STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
if (smmu->features & ARM_SMMU_FEAT_STALLS)
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
......@@ -1083,7 +1079,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
<< STRTAB_STE_0_S1CTXPTR_SHIFT) |
STRTAB_STE_0_CFG_S1_TRANS;
}
if (ste->s2_cfg) {
......@@ -1372,8 +1367,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
case IOMMU_CAP_INTR_REMAP:
return true; /* MSIs are just memory writes */
case IOMMU_CAP_NOEXEC:
return true;
default:
......@@ -1795,8 +1788,10 @@ static int arm_smmu_add_device(struct device *dev)
}
group = iommu_group_get_for_dev(dev);
if (!IS_ERR(group))
if (!IS_ERR(group)) {
iommu_group_put(group);
iommu_device_link(&smmu->iommu, dev);
}
return PTR_ERR_OR_ZERO(group);
}
......@@ -1805,14 +1800,17 @@ static void arm_smmu_remove_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct arm_smmu_master_data *master;
struct arm_smmu_device *smmu;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
master = fwspec->iommu_priv;
smmu = master->smmu;
if (master && master->ste.valid)
arm_smmu_detach_dev(dev);
iommu_group_remove_device(dev);
iommu_device_unlink(&smmu->iommu, dev);
kfree(master);
iommu_fwspec_free(dev);
}
......@@ -1883,6 +1881,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
static void arm_smmu_get_resv_regions(struct device *dev,
struct list_head *head)
{
struct iommu_resv_region *region;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
prot, IOMMU_RESV_MSI);
if (!region)
return;
list_add_tail(&region->list, head);
}
static void arm_smmu_put_resv_regions(struct device *dev,
struct list_head *head)
{
struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list)
kfree(entry);
}
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
......@@ -1898,6 +1919,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = arm_smmu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
......@@ -1983,17 +2006,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
u32 size, l1size;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
/*
* If we can resolve everything with a single L2 table, then we
* just need a single L1 descriptor. Otherwise, calculate the L1
* size, capped to the SIDSIZE.
*/
if (smmu->sid_bits < STRTAB_SPLIT) {
size = 0;
} else {
size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
size = min(size, smmu->sid_bits - STRTAB_SPLIT);
}
/* Calculate the L1 size, capped to the SIDSIZE. */
size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
size = min(size, smmu->sid_bits - STRTAB_SPLIT);
cfg->num_l1_ents = 1 << size;
size += STRTAB_SPLIT;
......@@ -2504,6 +2519,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
/*
* If the SMMU supports fewer bits than would fill a single L2 stream
* table, use a linear table instead.
*/
if (smmu->sid_bits <= STRTAB_SPLIT)
smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
/* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
......@@ -2613,6 +2635,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
struct resource *res;
resource_size_t ioaddr;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
bool bypass;
......@@ -2630,6 +2653,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
dev_err(dev, "MMIO region too small (%pr)\n", res);
return -EINVAL;
}
ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base))
......@@ -2682,7 +2706,15 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return ret;
/* And we're up. Go go go! */
iommu_register_instance(dev->fwnode, &arm_smmu_ops);
ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
"smmu3.%pa", &ioaddr);
if (ret)
return ret;
iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
ret = iommu_device_register(&smmu->iommu);