mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
dma-mapping fixes for Linux 6.3
- fix for swiotlb deadlock due to wrong alignment checks (GuoRui.Yu, Petr Tesarik) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmQmEEsLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYMfhw//b1pmwO3ESd5mLwQh9sZrvndi6oyYwqwOy5KMyVtx 0BndOh7/wpJZlYASjj2imNCYDr2g9hsKpm3ZLdN0eY0fQbwQ8ZYjMLhNCylW/nsK pr3adV+sZc0VMr3smeB0Jl7p68KU9Tz0vkDEtG/XpllhFfaS52rFSlCqagDbL11t NA+Ev39RaVij2/M8z59jrd4cr0X74PqWHgtbNawXjHKQckiRm1un5Sg05O830VV0 shGQ/msJPbYdCBT9KD7trzRvFViBS+WeMHFx6I/PbsEUt7nPkGjO7eZiORD28AQ0 NjUqVa03m38RFi9YSXE3IZms0xo4panEGndpTF/eJ0Ly3DcES9FepzI6qHQf3Dq6 vPk5ok9DmTvZy/tWcmfHDWPIsn3vOStlf4SSADTYiOcSEysUJmzRcHaSgzYGA9Fd LQV1UVuYb8ARCa8knZqaxfQstPSzX6PDt1wgHY1k0Ikdvu5OwWskSy7wEMs4Gsd8 w6lcPAvx7QRpqQ27WwSBSMWYJXrdRLO+hckchBrJ8jnedd2IEqMUwMcImq3fLogx Kl6MND8tNEcetyzKxdk9oZ7dyAG5iAQY1diBIwzuu7SIJ/Pm1KmcvfzfULYdpnhP hs8HtntEMhuAmQOWSckwxONwzjPSNEUi+SOu0ywLjaQsFpu9J8eI4bNymvx+5WvH kqg= =MxwN -----END PGP SIGNATURE----- Merge tag 'dma-mapping-6.3-2023-03-31' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping fixes from Christoph Hellwig: - fix for swiotlb deadlock due to wrong alignment checks (GuoRui.Yu, Petr Tesarik) * tag 'dma-mapping-6.3-2023-03-31' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: fix slot alignment checks swiotlb: use wrap_area_index() instead of open-coding it swiotlb: fix the deadlock in swiotlb_do_find_slots
This commit is contained in:
commit
62bad54b26
@ -625,8 +625,8 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
|
||||
unsigned int iotlb_align_mask =
|
||||
dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
|
||||
unsigned int nslots = nr_slots(alloc_size), stride;
|
||||
unsigned int index, wrap, count = 0, i;
|
||||
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
|
||||
unsigned int index, slots_checked, count = 0, i;
|
||||
unsigned long flags;
|
||||
unsigned int slot_base;
|
||||
unsigned int slot_index;
|
||||
@ -634,30 +634,35 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
|
||||
BUG_ON(!nslots);
|
||||
BUG_ON(area_index >= mem->nareas);
|
||||
|
||||
/*
|
||||
* For allocations of PAGE_SIZE or larger only look for page aligned
|
||||
* allocations.
|
||||
*/
|
||||
if (alloc_size >= PAGE_SIZE)
|
||||
iotlb_align_mask &= PAGE_MASK;
|
||||
iotlb_align_mask &= alloc_align_mask;
|
||||
|
||||
/*
|
||||
* For mappings with an alignment requirement don't bother looping to
|
||||
* unaligned slots once we found an aligned one. For allocations of
|
||||
* PAGE_SIZE or larger only look for page aligned allocations.
|
||||
* unaligned slots once we found an aligned one.
|
||||
*/
|
||||
stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
|
||||
if (alloc_size >= PAGE_SIZE)
|
||||
stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
|
||||
stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
|
||||
|
||||
spin_lock_irqsave(&area->lock, flags);
|
||||
if (unlikely(nslots > mem->area_nslabs - area->used))
|
||||
goto not_found;
|
||||
|
||||
slot_base = area_index * mem->area_nslabs;
|
||||
index = wrap = wrap_area_index(mem, ALIGN(area->index, stride));
|
||||
index = area->index;
|
||||
|
||||
do {
|
||||
for (slots_checked = 0; slots_checked < mem->area_nslabs; ) {
|
||||
slot_index = slot_base + index;
|
||||
|
||||
if (orig_addr &&
|
||||
(slot_addr(tbl_dma_addr, slot_index) &
|
||||
iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
|
||||
index = wrap_area_index(mem, index + 1);
|
||||
slots_checked++;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -673,7 +678,8 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
|
||||
goto found;
|
||||
}
|
||||
index = wrap_area_index(mem, index + stride);
|
||||
} while (index != wrap);
|
||||
slots_checked += stride;
|
||||
}
|
||||
|
||||
not_found:
|
||||
spin_unlock_irqrestore(&area->lock, flags);
|
||||
@ -693,10 +699,7 @@ found:
|
||||
/*
|
||||
* Update the indices to avoid searching in the next round.
|
||||
*/
|
||||
if (index + nslots < mem->area_nslabs)
|
||||
area->index = index + nslots;
|
||||
else
|
||||
area->index = 0;
|
||||
area->index = wrap_area_index(mem, index + nslots);
|
||||
area->used += nslots;
|
||||
spin_unlock_irqrestore(&area->lock, flags);
|
||||
return slot_index;
|
||||
|
Loading…
Reference in New Issue
Block a user