1
0
Fork 0

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 - add the new bpf syscall to ARM.
 - drop a redundant return statement in __iommu_alloc_remap()
 - fix a performance issue noticed by Thomas Petazzoni with
   kmap_atomic().
 - fix an issue with the L2 cache OF parsing code which caused it to
   incorrectly print warnings on each boot, and make the warning text
   more consistent with the rest of the code

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: 8180/1: mm: implement no-highmem fast path in kmap_atomic_pfn()
  ARM: 8183/1: l2c: Improve l2c310_of_parse() error message
  ARM: 8181/1: Drop extra return statement
  ARM: 8182/1: l2c: Make l2x0_cache_size_of_parse() return 'int'
  ARM: enable bpf syscall
hifive-unleashed-5.1
Linus Torvalds 2014-11-02 12:56:20 -08:00
commit 3c43de0ffd
5 changed files with 23 additions and 9 deletions

View File

@ -412,6 +412,7 @@
#define __NR_seccomp (__NR_SYSCALL_BASE+383)
#define __NR_getrandom (__NR_SYSCALL_BASE+384)
#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
#define __NR_bpf (__NR_SYSCALL_BASE+386)
/*
* The following SWIs are ARM private.

View File

@ -395,6 +395,7 @@
CALL(sys_seccomp)
CALL(sys_getrandom)
/* 385 */ CALL(sys_memfd_create)
CALL(sys_bpf)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted

View File

@ -956,7 +956,7 @@ static u32 cache_id_part_number_from_dt;
* @associativity: variable to return the calculated associativity in
* @max_way_size: the maximum size in bytes for the cache ways
*/
static void __init l2x0_cache_size_of_parse(const struct device_node *np,
static int __init l2x0_cache_size_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask,
u32 *associativity,
u32 max_way_size)
@ -974,7 +974,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
of_property_read_u32(np, "cache-line-size", &line_size);
if (!cache_size || !sets)
return;
return -ENODEV;
/* All these l2 caches have the same line = block size actually */
if (!line_size) {
@ -1009,7 +1009,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
if (way_size > max_way_size) {
pr_err("L2C OF: set size %dKB is too large\n", way_size);
return;
return -EINVAL;
}
pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
@ -1027,7 +1027,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
if (way_size_bits < 1 || way_size_bits > 6) {
pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
way_size);
return;
return -EINVAL;
}
mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
@ -1036,6 +1036,8 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
return 0;
}
static void __init l2x0_of_parse(const struct device_node *np,
@ -1046,6 +1048,7 @@ static void __init l2x0_of_parse(const struct device_node *np,
u32 dirty = 0;
u32 val = 0, mask = 0;
u32 assoc;
int ret;
of_property_read_u32(np, "arm,tag-latency", &tag);
if (tag) {
@ -1068,7 +1071,10 @@ static void __init l2x0_of_parse(const struct device_node *np,
val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
}
l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
if (ret)
return;
if (assoc > 8) {
pr_err("l2x0 of: cache setting yield too high associativity\n");
pr_err("l2x0 of: %d calculated, max 8\n", assoc);
@ -1125,6 +1131,7 @@ static void __init l2c310_of_parse(const struct device_node *np,
u32 tag[3] = { 0, 0, 0 };
u32 filter[2] = { 0, 0 };
u32 assoc;
int ret;
of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
if (tag[0] && tag[1] && tag[2])
@ -1152,7 +1159,10 @@ static void __init l2c310_of_parse(const struct device_node *np,
l2x0_base + L310_ADDR_FILTER_START);
}
l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
if (ret)
return;
switch (assoc) {
case 16:
*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
@ -1164,8 +1174,8 @@ static void __init l2c310_of_parse(const struct device_node *np,
*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
break;
default:
pr_err("PL310 OF: cache setting yield illegal associativity\n");
pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc);
pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
assoc);
break;
}
}

View File

@ -1198,7 +1198,6 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
{
return dma_common_pages_remap(pages, size,
VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
return NULL;
}
/*

View File

@ -127,8 +127,11 @@ void *kmap_atomic_pfn(unsigned long pfn)
{
unsigned long vaddr;
int idx, type;
struct page *page = pfn_to_page(pfn);
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();