1
0
Fork 0

misc: xilinx-sdfec: improve get_user_pages_fast() error handling

[ Upstream commit 57343d5161 ]

This fixes the case of get_user_pages_fast() returning a -errno.
The result needs to be stored in a signed integer. And for safe
signed/unsigned comparisons, it's best to keep everything signed.
And get_user_pages_fast() also expects a signed value for number
of pages to pin.

Therefore, change most relevant variables, from u32 to int. Leave
"n" unsigned, for convenience in checking for overflow. And provide
a WARN_ON_ONCE() and early return, if overflow occurs.

Also, as long as we're tidying up: rename the page array from page,
to pages, in order to match the conventions used in most other call
sites.

Fixes: 20ec628e80 ("misc: xilinx_sdfec: Add ability to configure LDPC")
Cc: Derek Kiernan <derek.kiernan@xilinx.com>
Cc: Dragan Cvetic <dragan.cvetic@xilinx.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Michal Simek <michal.simek@xilinx.com>
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Link: https://lore.kernel.org/r/20200527012628.1100649-2-jhubbard@nvidia.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
5.4-rM2-2.2.x-imx-squashed
John Hubbard 2020-05-26 18:26:26 -07:00 committed by Greg Kroah-Hartman
parent 24aa54a8b0
commit 3302d17ef6
1 changed files with 17 additions and 10 deletions

View File

@ -602,10 +602,10 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
const u32 depth) const u32 depth)
{ {
u32 reg = 0; u32 reg = 0;
u32 res; int res, i, nr_pages;
u32 n, i; u32 n;
u32 *addr = NULL; u32 *addr = NULL;
struct page *page[MAX_NUM_PAGES]; struct page *pages[MAX_NUM_PAGES];
/* /*
* Writes that go beyond the length of * Writes that go beyond the length of
@ -622,15 +622,22 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE) if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
n += 1; n += 1;
res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page); if (WARN_ON_ONCE(n > INT_MAX))
if (res < n) { return -EINVAL;
for (i = 0; i < res; i++)
put_page(page[i]); nr_pages = n;
res = get_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages);
if (res < nr_pages) {
if (res > 0) {
for (i = 0; i < res; i++)
put_page(pages[i]);
}
return -EINVAL; return -EINVAL;
} }
for (i = 0; i < n; i++) { for (i = 0; i < nr_pages; i++) {
addr = kmap(page[i]); addr = kmap(pages[i]);
do { do {
xsdfec_regwrite(xsdfec, xsdfec_regwrite(xsdfec,
base_addr + ((offset + reg) * base_addr + ((offset + reg) *
@ -639,7 +646,7 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
reg++; reg++;
} while ((reg < len) && } while ((reg < len) &&
((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)); ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
put_page(page[i]); put_page(pages[i]);
} }
return reg; return reg;
} }