swiotlb-xen.c 21 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/*
 *  Copyright 2010
 *  by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 *
 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License v2.0 as published by
 * the Free Software Foundation
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * PV guests under Xen are running in an non-contiguous memory architecture.
 *
 * When PCI pass-through is utilized, this necessitates an IOMMU for
 * translating bus (DMA) to virtual and vice-versa and also providing a
 * mechanism to have contiguous pages for device drivers operations (say DMA
 * operations).
 *
 * Specifically, under Xen the Linux idea of pages is an illusion. It
 * assumes that pages start at zero and go up to the available memory. To
 * help with that, the Linux Xen MMU provides a lookup mechanism to
 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
 * memory is not contiguous. Xen hypervisor stitches memory for guests
 * from different pools, which means there is no guarantee that PFN==MFN
 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
 * allocated in descending order (high to low), meaning the guest might
 * never get any MFN's under the 4GB mark.
 *
 */

36 37
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt

38 39
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
40
#include <linux/export.h>
41 42 43
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
44
#include <xen/hvc-console.h>
45

46
#include <asm/dma-mapping.h>
47
#include <asm/xen/page-coherent.h>
48

49
#include <trace/events/swiotlb.h>
50 51 52 53 54 55
/*
 * Used to do a quick range check in swiotlb_tbl_unmap_single and
 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
 * API.
 */

56 57 58 59 60 61 62 63 64 65 66 67 68 69
#ifndef CONFIG_X86
static unsigned long dma_alloc_coherent_mask(struct device *dev,
					    gfp_t gfp)
{
	unsigned long dma_mask = 0;

	dma_mask = dev->coherent_dma_mask;
	if (!dma_mask)
		dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);

	return dma_mask;
}
#endif

70 71
#define XEN_SWIOTLB_ERROR_CODE	(~(dma_addr_t)0x0)

72 73 74 75 76 77
static char *xen_io_tlb_start, *xen_io_tlb_end;
static unsigned long xen_io_tlb_nslabs;
/*
 * Quick lookup value of the bus address of the IOTLB.
 */

78
static u64 start_dma_addr;
79

80
/*
81
 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
82 83 84
 * can be 32bit when dma_addr_t is 64bit leading to a loss in
 * information if the shift is done before casting to 64bit.
 */
85
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
86
{
87 88
	unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
	dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
89

90
	dma |= paddr & ~XEN_PAGE_MASK;
91 92

	return dma;
93 94
}

95
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
96
{
97 98
	unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
	dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
99 100
	phys_addr_t paddr = dma;

101
	paddr |= baddr & ~XEN_PAGE_MASK;
102 103

	return paddr;
104 105
}

106
static inline dma_addr_t xen_virt_to_bus(void *address)
107 108 109 110
{
	return xen_phys_to_bus(virt_to_phys(address));
}

111
static int check_pages_physically_contiguous(unsigned long xen_pfn,
112 113 114
					     unsigned int offset,
					     size_t length)
{
115
	unsigned long next_bfn;
116 117 118
	int i;
	int nr_pages;

119 120
	next_bfn = pfn_to_bfn(xen_pfn);
	nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
121 122

	for (i = 1; i < nr_pages; i++) {
123
		if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
124 125 126 127 128
			return 0;
	}
	return 1;
}

129
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
130
{
131 132
	unsigned long xen_pfn = XEN_PFN_DOWN(p);
	unsigned int offset = p & ~XEN_PAGE_MASK;
133

134
	if (offset + size <= XEN_PAGE_SIZE)
135
		return 0;
136
	if (check_pages_physically_contiguous(xen_pfn, offset, size))
137 138 139 140 141 142
		return 0;
	return 1;
}

static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
{
143 144 145
	unsigned long bfn = XEN_PFN_DOWN(dma_addr);
	unsigned long xen_pfn = bfn_to_local_pfn(bfn);
	phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
146 147 148 149 150

	/* If the address is outside our domain, it CAN
	 * have the same virtual address as another address
	 * in our domain. Therefore _only_ check address within our domain.
	 */
151
	if (pfn_valid(PFN_DOWN(paddr))) {
152 153 154 155 156 157 158 159 160 161 162 163 164
		return paddr >= virt_to_phys(xen_io_tlb_start) &&
		       paddr < virt_to_phys(xen_io_tlb_end);
	}
	return 0;
}

static int max_dma_bits = 32;

static int
xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
{
	int i, rc;
	int dma_bits;
165
	dma_addr_t dma_handle;
166
	phys_addr_t p = virt_to_phys(buf);
167 168 169 170 171 172 173 174 175

	dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;

	i = 0;
	do {
		int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);

		do {
			rc = xen_create_contiguous_region(
176
				p + (i << IO_TLB_SHIFT),
177
				get_order(slabs << IO_TLB_SHIFT),
178
				dma_bits, &dma_handle);
179 180 181 182 183 184 185 186
		} while (rc && dma_bits++ < max_dma_bits);
		if (rc)
			return rc;

		i += slabs;
	} while (i < nslabs);
	return 0;
}
187 188 189 190 191 192 193
static unsigned long xen_set_nslabs(unsigned long nr_tbl)
{
	if (!nr_tbl) {
		xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
		xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
	} else
		xen_io_tlb_nslabs = nr_tbl;
194

195 196
	return xen_io_tlb_nslabs << IO_TLB_SHIFT;
}
197

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
enum xen_swiotlb_err {
	XEN_SWIOTLB_UNKNOWN = 0,
	XEN_SWIOTLB_ENOMEM,
	XEN_SWIOTLB_EFIXUP
};

static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
{
	switch (err) {
	case XEN_SWIOTLB_ENOMEM:
		return "Cannot allocate Xen-SWIOTLB buffer\n";
	case XEN_SWIOTLB_EFIXUP:
		return "Failed to get contiguous memory for DMA from Xen!\n"\
		    "You either: don't have the permissions, do not have"\
		    " enough free memory under 4GB, or the hypervisor memory"\
		    " is too fragmented!";
	default:
		break;
	}
	return "";
}
219
int __ref xen_swiotlb_init(int verbose, bool early)
220
{
221
	unsigned long bytes, order;
222
	int rc = -ENOMEM;
223
	enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
224
	unsigned int repeat = 3;
225

226
	xen_io_tlb_nslabs = swiotlb_nr_tbl();
227
retry:
228
	bytes = xen_set_nslabs(xen_io_tlb_nslabs);
229
	order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
230 231 232
	/*
	 * Get IO TLB memory from any location.
	 */
233 234 235 236 237 238
	if (early)
		xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
	else {
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
		while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
239
			xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
240 241 242 243 244
			if (xen_io_tlb_start)
				break;
			order--;
		}
		if (order != get_order(bytes)) {
245 246
			pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
				(PAGE_SIZE << order) >> 20);
247 248 249 250
			xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
			bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
		}
	}
251
	if (!xen_io_tlb_start) {
252
		m_ret = XEN_SWIOTLB_ENOMEM;
253 254
		goto error;
	}
255 256 257 258 259 260 261
	xen_io_tlb_end = xen_io_tlb_start + bytes;
	/*
	 * And replace that memory with pages under 4GB.
	 */
	rc = xen_swiotlb_fixup(xen_io_tlb_start,
			       bytes,
			       xen_io_tlb_nslabs);
262
	if (rc) {
263 264 265 266 267 268
		if (early)
			free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
		else {
			free_pages((unsigned long)xen_io_tlb_start, order);
			xen_io_tlb_start = NULL;
		}
269
		m_ret = XEN_SWIOTLB_EFIXUP;
270
		goto error;
271
	}
272
	start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
273
	if (early) {
274 275 276
		if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
			 verbose))
			panic("Cannot allocate SWIOTLB buffer");
277 278
		rc = 0;
	} else
279
		rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
280 281 282 283

	if (!rc)
		swiotlb_set_max_segment(PAGE_SIZE);

284
	return rc;
285
error:
286 287 288
	if (repeat--) {
		xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
					(xen_io_tlb_nslabs >> 1));
289 290
		pr_info("Lowering to %luMB\n",
			(xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
291 292
		goto retry;
	}
293
	pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
294 295 296 297 298
	if (early)
		panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
	else
		free_pages((unsigned long)xen_io_tlb_start, order);
	return rc;
299
}
300 301

static void *
302
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
303
			   dma_addr_t *dma_handle, gfp_t flags,
304
			   unsigned long attrs)
305 306 307 308
{
	void *ret;
	int order = get_order(size);
	u64 dma_mask = DMA_BIT_MASK(32);
309 310
	phys_addr_t phys;
	dma_addr_t dev_addr;
311 312 313 314 315 316 317 318 319

	/*
	* Ignore region specifiers - the kernel's ideas of
	* pseudo-phys memory layout has nothing to do with the
	* machine physical layout.  We can't allocate highmem
	* because we can't return a pointer to it.
	*/
	flags &= ~(__GFP_DMA | __GFP_HIGHMEM);

320 321 322 323 324 325
	/* On ARM this function returns an ioremap'ped virtual address for
	 * which virt_to_phys doesn't return the corresponding physical
	 * address. In fact on ARM virt_to_phys only works for kernel direct
	 * mapped RAM memory. Also see comment below.
	 */
	ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
326

327 328 329
	if (!ret)
		return ret;

330
	if (hwdev && hwdev->coherent_dma_mask)
331
		dma_mask = dma_alloc_coherent_mask(hwdev, flags);
332

333 334 335 336 337
	/* At this point dma_handle is the physical address, next we are
	 * going to set it to the machine address.
	 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
	 * to *dma_handle. */
	phys = *dma_handle;
338 339 340 341 342
	dev_addr = xen_phys_to_bus(phys);
	if (((dev_addr + size - 1 <= dma_mask)) &&
	    !range_straddles_page_boundary(phys, size))
		*dma_handle = dev_addr;
	else {
343
		if (xen_create_contiguous_region(phys, order,
344
						 fls64(dma_mask), dma_handle) != 0) {
345
			xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
346 347 348
			return NULL;
		}
	}
349
	memset(ret, 0, size);
350 351 352
	return ret;
}

353
static void
354
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
355
			  dma_addr_t dev_addr, unsigned long attrs)
356 357
{
	int order = get_order(size);
358 359
	phys_addr_t phys;
	u64 dma_mask = DMA_BIT_MASK(32);
360

361 362 363
	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;

364 365 366
	/* do not use virt_to_phys because on ARM it doesn't return you the
	 * physical address */
	phys = xen_bus_to_phys(dev_addr);
367

368
	if (((dev_addr + size - 1 <= dma_mask)) ||
369
	    range_straddles_page_boundary(phys, size))
370
		xen_destroy_contiguous_region(phys, order);
371

372
	xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
373 374 375 376 377 378 379 380 381
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
 * physical address to use is returned.
 *
 * Once the device is given the dma address, the device owns this memory until
 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
 */
382
static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
383 384
				unsigned long offset, size_t size,
				enum dma_data_direction dir,
385
				unsigned long attrs)
386
{
387
	phys_addr_t map, phys = page_to_phys(page) + offset;
388 389 390 391 392 393 394 395 396
	dma_addr_t dev_addr = xen_phys_to_bus(phys);

	BUG_ON(dir == DMA_NONE);
	/*
	 * If the address happens to be in the device's DMA window,
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
	if (dma_capable(dev, dev_addr, size) &&
397
	    !range_straddles_page_boundary(phys, size) &&
398
		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
399
		(swiotlb_force != SWIOTLB_FORCE)) {
400 401 402
		/* we are not interested in the dma_addr returned by
		 * xen_dma_map_page, only in the potential cache flushes executed
		 * by the function. */
403
		xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
404
		return dev_addr;
405
	}
406 407 408 409

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
410 411
	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);

412 413
	map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
				     attrs);
414
	if (map == SWIOTLB_MAP_ERROR)
415
		return XEN_SWIOTLB_ERROR_CODE;
416

417
	dev_addr = xen_phys_to_bus(map);
418
	xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
419
					dev_addr, map & ~PAGE_MASK, size, dir, attrs);
420 421 422 423

	/*
	 * Ensure that the address returned is DMA'ble
	 */
424 425 426
	if (dma_capable(dev, dev_addr, size))
		return dev_addr;

427 428
	attrs |= DMA_ATTR_SKIP_CPU_SYNC;
	swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
429

430
	return XEN_SWIOTLB_ERROR_CODE;
431 432 433 434 435 436 437 438 439 440 441
}

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
 * match what was provided for in a previous xen_swiotlb_map_page call.  All
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
442
			     size_t size, enum dma_data_direction dir,
443
			     unsigned long attrs)
444 445 446 447 448
{
	phys_addr_t paddr = xen_bus_to_phys(dev_addr);

	BUG_ON(dir == DMA_NONE);

449
	xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
450

451 452
	/* NOTE: We use dev_addr here, not paddr! */
	if (is_xen_swiotlb_buffer(dev_addr)) {
453
		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

	/*
	 * phys_to_virt doesn't work with hihgmem page but we could
	 * call dma_mark_clean() with hihgmem page here. However, we
	 * are fine since dma_mark_clean() is null on POWERPC. We can
	 * make dma_mark_clean() take a physical address if necessary.
	 */
	dma_mark_clean(phys_to_virt(paddr), size);
}

469
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
470
			    size_t size, enum dma_data_direction dir,
471
			    unsigned long attrs)
472
{
473
	xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
}

/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
 * address back to the card, you must first perform a
 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
static void
xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			enum dma_sync_target target)
{
	phys_addr_t paddr = xen_bus_to_phys(dev_addr);

	BUG_ON(dir == DMA_NONE);

495
	if (target == SYNC_FOR_CPU)
496
		xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
497

498
	/* NOTE: We use dev_addr here, not paddr! */
499
	if (is_xen_swiotlb_buffer(dev_addr))
500
		swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
501 502

	if (target == SYNC_FOR_DEVICE)
503
		xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523

	if (dir != DMA_FROM_DEVICE)
		return;

	dma_mark_clean(phys_to_virt(paddr), size);
}

void
xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
				size_t size, enum dma_data_direction dir)
{
	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}

void
xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
				   size_t size, enum dma_data_direction dir)
{
	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
 * concerning calls here are the same as for swiotlb_unmap_page() above.
 */
static void
xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
			   int nelems, enum dma_data_direction dir,
			   unsigned long attrs)
{
	struct scatterlist *sg;
	int i;

	BUG_ON(dir == DMA_NONE);

	for_each_sg(sgl, sg, nelems, i)
		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);

}
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559

/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the above xen_swiotlb_map_page
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
 * same here.
 */
560
static int
561 562
xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
			 int nelems, enum dma_data_direction dir,
563
			 unsigned long attrs)
564 565 566 567 568 569 570 571 572 573
{
	struct scatterlist *sg;
	int i;

	BUG_ON(dir == DMA_NONE);

	for_each_sg(sgl, sg, nelems, i) {
		phys_addr_t paddr = sg_phys(sg);
		dma_addr_t dev_addr = xen_phys_to_bus(paddr);

574
		if (swiotlb_force == SWIOTLB_FORCE ||
575
		    xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
576 577
		    !dma_capable(hwdev, dev_addr, sg->length) ||
		    range_straddles_page_boundary(paddr, sg->length)) {
578 579 580 581
			phys_addr_t map = swiotlb_tbl_map_single(hwdev,
								 start_dma_addr,
								 sg_phys(sg),
								 sg->length,
582
								 dir, attrs);
583
			if (map == SWIOTLB_MAP_ERROR) {
584
				dev_warn(hwdev, "swiotlb buffer is full\n");
585 586
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
587
				attrs |= DMA_ATTR_SKIP_CPU_SYNC;
588 589
				xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
							   attrs);
590
				sg_dma_len(sgl) = 0;
591
				return 0;
592
			}
593
			dev_addr = xen_phys_to_bus(map);
594
			xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
595
						dev_addr,
596 597 598 599
						map & ~PAGE_MASK,
						sg->length,
						dir,
						attrs);
600
			sg->dma_address = dev_addr;
601 602 603 604 605
		} else {
			/* we are not interested in the dma_addr returned by
			 * xen_dma_map_page, only in the potential cache flushes executed
			 * by the function. */
			xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
606
						dev_addr,
607 608 609 610
						paddr & ~PAGE_MASK,
						sg->length,
						dir,
						attrs);
611
			sg->dma_address = dev_addr;
612
		}
613
		sg_dma_len(sg) = sg->length;
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
	}
	return nelems;
}

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
static void
xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
		    int nelems, enum dma_data_direction dir,
		    enum dma_sync_target target)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(sgl, sg, nelems, i)
		xen_swiotlb_sync_single(hwdev, sg->dma_address,
635
					sg_dma_len(sg), dir, target);
636 637
}

638
static void
639 640 641 642 643 644
xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
			    int nelems, enum dma_data_direction dir)
{
	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}

645
static void
646 647 648 649 650 651 652 653 654 655 656 657
xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
			       int nelems, enum dma_data_direction dir)
{
	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}

/*
 * Return whether the given device DMA address mask can be supported
 * properly.  For example, if your device can only drive the low 24-bits
 * during bus mastering, then you would pass 0x00ffffff as the mask to
 * this function.
 */
658
static int
659 660 661 662
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
	return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
}
663

664 665 666 667 668
/*
 * Create userspace mapping for the DMA-coherent memory.
 * This function should be called with the pages from the current domain only,
 * passing pages mapped from other domains would lead to memory corruption.
 */
669
static int
670 671 672 673 674
xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
		     unsigned long attrs)
{
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
675 676
	if (xen_get_dma_ops(dev)->mmap)
		return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
677 678 679 680
						    dma_addr, size, attrs);
#endif
	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
681 682 683 684 685

/*
 * This function should be called with the pages from the current domain only,
 * passing pages mapped from other domains would lead to memory corruption.
 */
686
static int
687 688 689 690 691
xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
			void *cpu_addr, dma_addr_t handle, size_t size,
			unsigned long attrs)
{
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
692
	if (xen_get_dma_ops(dev)->get_sgtable) {
693 694 695 696 697 698 699 700 701
#if 0
	/*
	 * This check verifies that the page belongs to the current domain and
	 * is not one mapped from another domain.
	 * This check is for debug only, and should not go to production build
	 */
		unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
		BUG_ON (!page_is_ram(bfn));
#endif
702
		return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
703 704 705 706 707
							   handle, size, attrs);
	}
#endif
	return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
}
708

709 710 711 712 713
static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	return dma_addr == XEN_SWIOTLB_ERROR_CODE;
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727
const struct dma_map_ops xen_swiotlb_dma_ops = {
	.alloc = xen_swiotlb_alloc_coherent,
	.free = xen_swiotlb_free_coherent,
	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
	.map_sg = xen_swiotlb_map_sg_attrs,
	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
	.map_page = xen_swiotlb_map_page,
	.unmap_page = xen_swiotlb_unmap_page,
	.dma_supported = xen_swiotlb_dma_supported,
	.mmap = xen_swiotlb_dma_mmap,
	.get_sgtable = xen_swiotlb_get_sgtable,
728
	.mapping_error	= xen_swiotlb_mapping_error,
729
};