1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/ethernet/mellanox/mlx4/cmd.c
	net/core/fib_rules.c
	net/ipv4/fib_frontend.c

The fib_rules.c and fib_frontend.c conflicts were locking adjustments
in 'net' overlapping addition and removal of code in 'net-next'.

The mlx4 conflict was a bug fix in 'net' happening in the same
place a constant was being replaced with a more suitable macro.

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2015-04-06 21:52:19 -04:00
commit c85d6975ef
91 changed files with 575 additions and 275 deletions

View File

@ -19,7 +19,9 @@ the parent DSA node. The maximum number of allowed child nodes is 4
(DSA_MAX_SWITCHES).
Each of these switch child nodes should have the following required properties:
- reg : Describes the switch address on the MII bus
- reg : Contains two fields. The first one describes the
address on the MII bus. The second is the switch
number that must be unique in cascaded configurations
- #address-cells : Must be 1
- #size-cells : Must be 0

View File

@ -114,6 +114,9 @@ ALPS Absolute Mode - Protocol Version 2
byte 4: 0 y6 y5 y4 y3 y2 y1 y0
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
the DualPoint Stick.
Dualpoint device -- interleaved packet format
---------------------------------------------
@ -127,6 +130,11 @@ Dualpoint device -- interleaved packet format
byte 7: 0 y6 y5 y4 y3 y2 y1 y0
byte 8: 0 z6 z5 z4 z3 z2 z1 z0
Devices which use the interleaving format normally send standard PS/2 mouse
packets for the DualPoint Stick + ALPS Absolute Mode packets for the
touchpad, switching to the interleaved packet format when both the stick and
the touchpad are used at the same time.
ALPS Absolute Mode - Protocol Version 3
---------------------------------------

View File

@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior.
The kernel does not provide button emulation for such devices but treats
them as any other INPUT_PROP_BUTTONPAD device.
INPUT_PROP_ACCELEROMETER
-------------------------
Directional axes on this device (absolute and/or relative x, y, z) represent
accelerometer data. All other axes retain their meaning. A device must not mix
regular directional axes and accelerometer axes on the same event node.
Guidelines:
==========
The guidelines below ensure proper single-touch and multi-finger functionality.

View File

@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE
The type of approaching tool. A lot of kernel drivers cannot distinguish
between different tool types, such as a finger or a pen. In such cases, the
event should be omitted. The protocol currently supports MT_TOOL_FINGER and
MT_TOOL_PEN [2]. For type B devices, this event is handled by input core;
drivers should instead use input_mt_report_slot_state().
event should be omitted. The protocol currently supports MT_TOOL_FINGER,
MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled
by input core; drivers should instead use input_mt_report_slot_state().
A contact's ABS_MT_TOOL_TYPE may change over time while still touching the
device, because the firmware may not be able to determine which tool is being
used when it first appears.
ABS_MT_BLOB_ID

View File

@ -637,8 +637,7 @@ F: drivers/gpu/drm/radeon/radeon_kfd.h
F: include/uapi/linux/kfd_ioctl.h
AMD MICROCODE UPDATE SUPPORT
M: Andreas Herrmann <herrmann.der.user@googlemail.com>
L: amd64-microcode@amd64.org
M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/amd*
@ -5095,7 +5094,7 @@ S: Supported
F: drivers/platform/x86/intel_menlow.c
INTEL IA32 MICROCODE UPDATE SUPPORT
M: Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/core*
F: arch/x86/kernel/cpu/microcode/intel*

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 0
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*

View File

@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
static inline int cpu_nr_cores(void)
{
return NR_CPUS >> threads_shift;
return nr_cpu_ids >> threads_shift;
}
static inline cpumask_t cpu_online_cores_map(void)

View File

@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
EVENT_CONSTRAINT_END
};
@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
if (c)
return c;
c = intel_pebs_constraints(event);
c = intel_shared_regs_constraints(cpuc, event);
if (c)
return c;
c = intel_shared_regs_constraints(cpuc, event);
c = intel_pebs_constraints(event);
if (c)
return c;

View File

@ -799,7 +799,21 @@ retint_swapgs: /* return to user-space */
cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */
jne opportunistic_sysret_failed
testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */
/*
* SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET,
* restoring TF results in a trap from userspace immediately after
* SYSRET. This would cause an infinite loop whenever #DB happens
* with register state that satisfies the opportunistic SYSRET
* conditions. For example, single-stepping this user code:
*
* movq $stuck_here,%rcx
* pushfq
* popq %r11
* stuck_here:
*
* would never get past 'stuck_here'.
*/
testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
jnz opportunistic_sysret_failed
/* nothing to check for RSP */

View File

@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{ "bx", 8, offsetof(struct pt_regs, bx) },
{ "cx", 8, offsetof(struct pt_regs, cx) },
{ "dx", 8, offsetof(struct pt_regs, dx) },
{ "si", 8, offsetof(struct pt_regs, dx) },
{ "si", 8, offsetof(struct pt_regs, si) },
{ "di", 8, offsetof(struct pt_regs, di) },
{ "bp", 8, offsetof(struct pt_regs, bp) },
{ "sp", 8, offsetof(struct pt_regs, sp) },

View File

@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
},
/* ASRock */
{ /* Handle problems with rebooting on ASRock Q1900DC-ITX */
.callback = set_pci_reboot,
.ident = "ASRock Q1900DC-ITX",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
},
},
/* ASUS */
{ /* Handle problems with rebooting on ASUS P4S800 */
.callback = set_bios_reboot,

View File

@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
unsigned long xen_max_p2m_pfn __read_mostly;
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#else
#define P2M_LIMIT 0
#endif
static DEFINE_SPINLOCK(p2m_update_lock);
static unsigned long *p2m_mid_missing_mfn;
@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
void __init xen_vmalloc_p2m_tree(void)
{
static struct vm_struct vm;
unsigned long p2m_limit;
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE);
vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);

View File

@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->physical_block_size);
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm(t->io_opt, b->io_opt);
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
t->cluster &= b->cluster;
t->discard_zeroes_data &= b->discard_zeroes_data;
@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->raid_partial_stripes_expensive);
/* Find lowest common alignment_offset */
t->alignment_offset = lcm(t->alignment_offset, alignment)
t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
% max(t->physical_block_size, t->io_min);
/* Verify that new alignment_offset is on a logical block boundary */
@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_discard_sectors);
t->discard_granularity = max(t->discard_granularity,
b->discard_granularity);
t->discard_alignment = lcm(t->discard_alignment, alignment) %
t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
t->discard_granularity;
}

View File

@ -86,10 +86,13 @@ static void dmi_table(u8 *buf, u32 len, int num,
int i = 0;
/*
* Stop when we see all the items the table claimed to have
* OR we run off the end of the table (also happens)
* Stop when we have seen all the items the table claimed to have
* (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
* off the end of the table (should never happen but sometimes does
* on bogus implementations.)
*/
while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
while ((!num || i < num) &&
(data - buf + sizeof(struct dmi_header)) <= len) {
const struct dmi_header *dm = (const struct dmi_header *)data;
/*
@ -529,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf)
if (memcmp(buf, "_SM3_", 5) == 0 &&
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
dmi_ver = get_unaligned_be16(buf + 7);
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
/*
* The 64-bit SMBIOS 3.0 entry point no longer has a field
* containing the number of structures present in the table.
* Instead, it defines the table size as a maximum size, and
* relies on the end-of-table structure type (#127) to be used
* to signal the end of the table.
* So let's define dmi_num as an upper bound as well: each
* structure has a 4 byte header, so dmi_len / 4 is an upper
* bound for the number of structures in the table.
*/
dmi_num = dmi_len / 4;
if (dmi_walk_early(dmi_decode) == 0) {
pr_info("SMBIOS %d.%d present.\n",
dmi_ver >> 8, dmi_ver & 0xFF);

View File

@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
kfree(edid);
return ret;

View File

@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
count = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
} else
count = (*connector_funcs->get_modes)(connector);
}

View File

@ -147,6 +147,7 @@ struct fimd_win_data {
unsigned int ovl_height;
unsigned int fb_width;
unsigned int fb_height;
unsigned int fb_pitch;
unsigned int bpp;
unsigned int pixel_format;
dma_addr_t dma_addr;
@ -532,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
win_data->offset_y = plane->crtc_y;
win_data->ovl_width = plane->crtc_width;
win_data->ovl_height = plane->crtc_height;
win_data->fb_pitch = plane->pitch;
win_data->fb_width = plane->fb_width;
win_data->fb_height = plane->fb_height;
win_data->dma_addr = plane->dma_addr[0] + offset;
win_data->bpp = plane->bpp;
win_data->pixel_format = plane->pixel_format;
win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
(plane->bpp >> 3);
win_data->buf_offsize =
plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
@ -704,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3);
val = (unsigned long)(win_data->dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));

View File

@ -55,6 +55,7 @@ struct hdmi_win_data {
unsigned int fb_x;
unsigned int fb_y;
unsigned int fb_width;
unsigned int fb_pitch;
unsigned int fb_height;
unsigned int src_width;
unsigned int src_height;
@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
} else {
luma_addr[0] = win_data->dma_addr;
chroma_addr[0] = win_data->dma_addr
+ (win_data->fb_width * win_data->fb_height);
+ (win_data->fb_pitch * win_data->fb_height);
}
if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
luma_addr[1] = luma_addr[0] + win_data->fb_width;
chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
luma_addr[1] = luma_addr[0] + win_data->fb_pitch;
chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch;
}
} else {
ctx->interlace = false;
@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) |
VP_IMG_VSIZE(win_data->fb_height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) |
VP_IMG_VSIZE(win_data->fb_height / 2));
vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* converting dma address base and source offset */
dma_addr = win_data->dma_addr
+ (win_data->fb_x * win_data->bpp >> 3)
+ (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
+ (win_data->fb_y * win_data->fb_pitch);
src_x_offset = 0;
src_y_offset = 0;
@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
win_data->fb_pitch / (win_data->bpp >> 3));
/* setup display size */
if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
win_data->fb_y = plane->fb_y;
win_data->fb_width = plane->fb_width;
win_data->fb_height = plane->fb_height;
win_data->fb_pitch = plane->pitch;
win_data->src_width = plane->src_width;
win_data->src_height = plane->src_height;

View File

@ -1487,7 +1487,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
if (i915_needs_cmd_parser(ring)) {
if (i915_needs_cmd_parser(ring) && args->batch_len) {
batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry,
eb,

View File

@ -1322,7 +1322,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, set->plane_id);
if (!plane) {
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
ret = -ENOENT;
goto out_unlock;
}
@ -1349,7 +1349,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, get->plane_id);
if (!plane) {
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
ret = -ENOENT;
goto out_unlock;
}

View File

@ -2129,6 +2129,7 @@
#define VCE_UENC_REG_CLOCK_GATING 0x207c0
#define VCE_SYS_INT_EN 0x21300
# define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3)
#define VCE_LMI_VCPU_CACHE_40BIT_BAR 0x2145c
#define VCE_LMI_CTRL2 0x21474
#define VCE_LMI_CTRL 0x21498
#define VCE_LMI_VM_CTRL 0x214a0

View File

@ -1565,6 +1565,7 @@ struct radeon_dpm {
int new_active_crtc_count;
u32 current_active_crtcs;
int current_active_crtc_count;
bool single_display;
struct radeon_dpm_dynamic_state dyn_state;
struct radeon_dpm_fan fan;
u32 tdp_limit;

View File

@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
static bool radeon_read_bios(struct radeon_device *rdev)
{
uint8_t __iomem *bios;
uint8_t __iomem *bios, val1, val2;
size_t size;
rdev->bios = NULL;
@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
return false;
}
if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
val1 = readb(&bios[0]);
val2 = readb(&bios[1]);
if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
rdev->bios = kmemdup(bios, size, GFP_KERNEL);
rdev->bios = kzalloc(size, GFP_KERNEL);
if (rdev->bios == NULL) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
memcpy_fromio(rdev->bios, bios, size);
pci_unmap_rom(rdev->pdev, bios);
return true;
}

View File

@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
struct radeon_bo *bo;
struct fence *fence;
int r;
bo = container_of(it, struct radeon_bo, mn_it);
@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
continue;
}
fence = reservation_object_get_excl(bo->tbo.resv);
if (fence) {
r = radeon_fence_wait((struct radeon_fence *)fence, false);
if (r)
DRM_ERROR("(%d) failed to wait for user bo\n", r);
}
r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
false, MAX_SCHEDULE_TIMEOUT);
if (r)
DRM_ERROR("(%d) failed to wait for user bo\n", r);
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);

View File

@ -837,12 +837,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
radeon_pm_compute_clocks(rdev);
}
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
enum radeon_pm_state_type dpm_state)
static bool radeon_dpm_single_display(struct radeon_device *rdev)
{
int i;
struct radeon_ps *ps;
u32 ui_class;
bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
true : false;
@ -858,6 +854,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
single_display = false;
return single_display;
}
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
enum radeon_pm_state_type dpm_state)
{
int i;
struct radeon_ps *ps;
u32 ui_class;
bool single_display = radeon_dpm_single_display(rdev);
/* certain older asics have a separare 3D performance state,
* so try that first if the user selected performance
*/
@ -983,6 +990,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
struct radeon_ps *ps;
enum radeon_pm_state_type dpm_state;
int ret;
bool single_display = radeon_dpm_single_display(rdev);
/* if dpm init failed */
if (!rdev->pm.dpm_enabled)
@ -1007,6 +1015,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
/* vce just modifies an existing state so force a change */
if (ps->vce_active != rdev->pm.dpm.vce_active)
goto force;
/* user has made a display change (such as timing) */
if (rdev->pm.dpm.single_display != single_display)
goto force;
if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
/* for pre-BTC and APUs if the num crtcs changed but state is the same,
* all we need to do is update the display configuration.
@ -1069,6 +1080,7 @@ force:
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
rdev->pm.dpm.single_display = single_display;
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {

View File

@ -495,7 +495,7 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
if (!ring->ready)
if (!ring->ring)
return 0;
/* print 8 dw before current rptr as often it's the last executed

View File

@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
/* double check that we don't free the table twice */
if (!ttm->sg->sgl)
return;
/* free the sg table and pages again */
dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);

View File

@ -156,6 +156,9 @@ int vce_v2_0_resume(struct radeon_device *rdev)
WREG32(VCE_LMI_SWAP_CNTL1, 0);
WREG32(VCE_LMI_VM_CTRL, 0);
WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8);
addr &= 0xff;
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size);
WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
WREG32(VCE_VCPU_CACHE_SIZE0, size);

View File

@ -659,7 +659,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = bma180_get_data_reg(data, bit);
if (ret < 0) {

View File

@ -168,14 +168,14 @@ static const struct {
int val;
int val2;
u8 bw_bits;
} bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08},
{15, 630000, 0x09},
{31, 250000, 0x0A},
{62, 500000, 0x0B},
{125, 0, 0x0C},
{250, 0, 0x0D},
{500, 0, 0x0E},
{1000, 0, 0x0F} };
} bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
{31, 260000, 0x09},
{62, 500000, 0x0A},
{125, 0, 0x0B},
{250, 0, 0x0C},
{500, 0, 0x0D},
{1000, 0, 0x0E},
{2000, 0, 0x0F} };
static const struct {
int bw_bits;
@ -840,7 +840,7 @@ static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
}
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
"7.810000 15.630000 31.250000 62.500000 125 250 500 1000");
"15.620000 31.260000 62.50000 125 250 500 1000 2000");
static struct attribute *bmc150_accel_attributes[] = {
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
@ -986,7 +986,7 @@ static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
int bit, ret, i = 0;
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = i2c_smbus_read_word_data(data->client,
BMC150_ACCEL_AXIS_TO_REG(bit));

View File

@ -956,7 +956,7 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = kxcjk1013_get_acc_reg(data, bit);
if (ret < 0) {

View File

@ -137,7 +137,8 @@ config AXP288_ADC
config CC10001_ADC
tristate "Cosmic Circuits 10001 ADC driver"
depends on HAS_IOMEM || HAVE_CLK || REGULATOR
depends on HAVE_CLK || REGULATOR
depends on HAS_IOMEM
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help

View File

@ -544,7 +544,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
{
struct iio_dev *idev = iio_trigger_get_drvdata(trig);
struct at91_adc_state *st = iio_priv(idev);
struct iio_buffer *buffer = idev->buffer;
struct at91_adc_reg_desc *reg = st->registers;
u32 status = at91_adc_readl(st, reg->trigger_register);
int value;
@ -564,7 +563,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
at91_adc_writel(st, reg->trigger_register,
status | value);
for_each_set_bit(bit, buffer->scan_mask,
for_each_set_bit(bit, idev->active_scan_mask,
st->num_channels) {
struct iio_chan_spec const *chan = idev->channels + bit;
at91_adc_writel(st, AT91_ADC_CHER,
@ -579,7 +578,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
at91_adc_writel(st, reg->trigger_register,
status & ~value);
for_each_set_bit(bit, buffer->scan_mask,
for_each_set_bit(bit, idev->active_scan_mask,
st->num_channels) {
struct iio_chan_spec const *chan = idev->channels + bit;
at91_adc_writel(st, AT91_ADC_CHDR,

View File

@ -188,12 +188,11 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
struct iio_buffer *buffer = indio_dev->buffer;
unsigned int enb = 0;
u8 bit;
tiadc_step_config(indio_dev);
for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels)
for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels)
enb |= (get_adc_step_bit(adc_dev, bit) << 1);
adc_dev->buffer_en_ch_steps = enb;

View File

@ -141,9 +141,13 @@ struct vf610_adc {
struct regulator *vref;
struct vf610_adc_feature adc_feature;
u32 sample_freq_avail[5];
struct completion completion;
};
static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
#define VF610_ADC_CHAN(_idx, _chan_type) { \
.type = (_chan_type), \
.indexed = 1, \
@ -180,35 +184,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = {
/* sentinel */
};
/*
* ADC sample frequency, unit is ADCK cycles.
* ADC clk source is ipg clock, which is the same as bus clock.
*
* ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
* SFCAdder: fixed to 6 ADCK cycles
* AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
* BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
* LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
*
* By default, enable 12 bit resolution mode, clock source
* set to ipg clock, So get below frequency group:
*/
static const u32 vf610_sample_freq_avail[5] =
{1941176, 559332, 286957, 145374, 73171};
static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
{
unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk);
int i;
/*
* Calculate ADC sample frequencies
* Sample time unit is ADCK cycles. ADCK clk source is ipg clock,
* which is the same as bus clock.
*
* ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
* SFCAdder: fixed to 6 ADCK cycles
* AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
* BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
* LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
*/
adck_rate = ipg_rate / info->adc_feature.clk_div;
for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
info->sample_freq_avail[i] =
adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3));
}
static inline void vf610_adc_cfg_init(struct vf610_adc *info)
{
struct vf610_adc_feature *adc_feature = &info->adc_feature;
/* set default Configuration for ADC controller */
info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET;
info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET;
adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET;
adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET;
info->adc_feature.calibration = true;
info->adc_feature.ovwren = true;
adc_feature->calibration = true;
adc_feature->ovwren = true;
info->adc_feature.clk_div = 1;
info->adc_feature.res_mode = 12;
info->adc_feature.sample_rate = 1;
info->adc_feature.lpm = true;
adc_feature->res_mode = 12;
adc_feature->sample_rate = 1;
adc_feature->lpm = true;
/* Use a save ADCK which is below 20MHz on all devices */
adc_feature->clk_div = 8;
vf610_adc_calculate_rates(info);
}
static void vf610_adc_cfg_post_set(struct vf610_adc *info)
@ -290,12 +306,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info)
cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
/* low power configuration */
cfg_data &= ~VF610_ADC_ADLPC_EN;
if (adc_feature->lpm)
cfg_data |= VF610_ADC_ADLPC_EN;
/* disable high speed */
cfg_data &= ~VF610_ADC_ADHSC_EN;
writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
@ -435,10 +449,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171");
static ssize_t vf610_show_samp_freq_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev));
size_t len = 0;
int i;
for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++)
len += scnprintf(buf + len, PAGE_SIZE - len,
"%u ", info->sample_freq_avail[i]);
/* replace trailing space by newline */
buf[len - 1] = '\n';
return len;
}
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail);
static struct attribute *vf610_attributes[] = {
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
NULL
};
@ -502,7 +533,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = vf610_sample_freq_avail[info->adc_feature.sample_rate];
*val = info->sample_freq_avail[info->adc_feature.sample_rate];
*val2 = 0;
return IIO_VAL_INT;
@ -525,9 +556,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
for (i = 0;
i < ARRAY_SIZE(vf610_sample_freq_avail);
i < ARRAY_SIZE(info->sample_freq_avail);
i++)
if (val == vf610_sample_freq_avail[i]) {
if (val == info->sample_freq_avail[i]) {
info->adc_feature.sample_rate = i;
vf610_adc_sample_set(info);
return 0;

View File

@ -822,7 +822,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
int bit, ret, i = 0;
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = i2c_smbus_read_word_data(data->client,
BMG160_AXIS_TO_REG(bit));

View File

@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
iio_trigger_set_drvdata(adis->trig, adis);
ret = iio_trigger_register(adis->trig);
indio_dev->trig = adis->trig;
indio_dev->trig = iio_trigger_get(adis->trig);
if (ret)
goto error_free_irq;

View File

@ -410,42 +410,46 @@ error_read_raw:
}
}
static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr)
static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
{
int result;
int result, i;
u8 d;
if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM)
return -EINVAL;
if (fsr == st->chip_config.fsr)
return 0;
for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
if (gyro_scale_6050[i] == val) {
d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
result = inv_mpu6050_write_reg(st,
st->reg->gyro_config, d);
if (result)
return result;
d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
if (result)
return result;
st->chip_config.fsr = fsr;
st->chip_config.fsr = i;
return 0;
}
}
return 0;
return -EINVAL;
}
static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs)
static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
{
int result;
int result, i;
u8 d;
if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM)
return -EINVAL;
if (fs == st->chip_config.accl_fs)
return 0;
for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
if (accel_scale[i] == val) {
d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
result = inv_mpu6050_write_reg(st,
st->reg->accl_config, d);
if (result)
return result;
d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
if (result)
return result;
st->chip_config.accl_fs = fs;
st->chip_config.accl_fs = i;
return 0;
}
}
return 0;
return -EINVAL;
}
static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
@ -471,10 +475,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
result = inv_mpu6050_write_fsr(st, val);
result = inv_mpu6050_write_gyro_scale(st, val2);
break;
case IIO_ACCEL:
result = inv_mpu6050_write_accel_fs(st, val);
result = inv_mpu6050_write_accel_scale(st, val2);
break;
default:
result = -EINVAL;

View File

@ -24,6 +24,16 @@
#include <linux/poll.h>
#include "inv_mpu_iio.h"
static void inv_clear_kfifo(struct inv_mpu6050_state *st)
{
unsigned long flags;
/* take the spin lock sem to avoid interrupt kick in */
spin_lock_irqsave(&st->time_stamp_lock, flags);
kfifo_reset(&st->timestamps);
spin_unlock_irqrestore(&st->time_stamp_lock, flags);
}
int inv_reset_fifo(struct iio_dev *indio_dev)
{
int result;
@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
INV_MPU6050_BIT_FIFO_RST);
if (result)
goto reset_fifo_fail;
/* clear timestamps fifo */
inv_clear_kfifo(st);
/* enable interrupt */
if (st->chip_config.accl_fifo_enable ||
st->chip_config.gyro_fifo_enable) {
@ -83,16 +97,6 @@ reset_fifo_fail:
return result;
}
static void inv_clear_kfifo(struct inv_mpu6050_state *st)
{
unsigned long flags;
/* take the spin lock sem to avoid interrupt kick in */
spin_lock_irqsave(&st->time_stamp_lock, flags);
kfifo_reset(&st->timestamps);
spin_unlock_irqrestore(&st->time_stamp_lock, flags);
}
/**
* inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
*/
@ -184,7 +188,6 @@ end_session:
flush_fifo:
/* Flush HW and SW FIFOs. */
inv_reset_fifo(indio_dev);
inv_clear_kfifo(st);
mutex_unlock(&indio_dev->mlock);
iio_trigger_notify_done(indio_dev->trig);

View File

@ -1227,7 +1227,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
base = KMX61_MAG_XOUT_L;
mutex_lock(&data->lock);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = kmx61_read_measurement(data, base, bit);
if (ret < 0) {

View File

@ -847,8 +847,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
* @attr_list: List of IIO device attributes
*
* This function frees the memory allocated for each of the IIO device
* attributes in the list. Note: if you want to reuse the list after calling
* this function you have to reinitialize it using INIT_LIST_HEAD().
* attributes in the list.
*/
void iio_free_chan_devattr_list(struct list_head *attr_list)
{
@ -856,6 +855,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list)
list_for_each_entry_safe(p, n, attr_list, l) {
kfree(p->dev_attr.attr.name);
list_del(&p->l);
kfree(p);
}
}
@ -936,6 +936,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
kfree(indio_dev->chan_attr_group.attrs);
indio_dev->chan_attr_group.attrs = NULL;
}
static void iio_dev_release(struct device *device)

View File

@ -500,6 +500,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
error_free_setup_event_lines:
iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
kfree(indio_dev->event_interface);
indio_dev->event_interface = NULL;
return ret;
}

View File

@ -494,7 +494,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = sx9500_read_proximity(data, &indio_dev->channels[bit],
&val);

View File

@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
*/
if ((PAGE_ALIGN(addr + size) <= size) ||
(PAGE_ALIGN(addr + size) <= addr))
return ERR_PTR(-EINVAL);
if (!can_do_mlock())
return ERR_PTR(-EPERM);

View File

@ -1154,10 +1154,28 @@ out:
mutex_unlock(&alps_mutex);
}
static void alps_report_bare_ps2_packet(struct input_dev *dev,
static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
unsigned char packet[],
bool report_buttons)
{
struct alps_data *priv = psmouse->private;
struct input_dev *dev;
/* Figure out which device to use to report the bare packet */
if (priv->proto_version == ALPS_PROTO_V2 &&
(priv->flags & ALPS_DUALPOINT)) {
/* On V2 devices the DualPoint Stick reports bare packets */
dev = priv->dev2;
} else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
/* Register dev3 mouse if we received PS/2 packet first time */
if (!IS_ERR(priv->dev3))
psmouse_queue_work(psmouse, &priv->dev3_register_work,
0);
return;
} else {
dev = priv->dev3;
}
if (report_buttons)
alps_report_buttons(dev, NULL,
packet[0] & 1, packet[0] & 2, packet[0] & 4);
@ -1232,8 +1250,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
* de-synchronization.
*/
alps_report_bare_ps2_packet(priv->dev2,
&psmouse->packet[3], false);
alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
false);
/*
* Continue with the standard ALPS protocol handling,
@ -1289,18 +1307,9 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
* properly we only do this if the device is fully synchronized.
*/
if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
/* Register dev3 mouse if we received PS/2 packet first time */
if (unlikely(!priv->dev3))
psmouse_queue_work(psmouse,
&priv->dev3_register_work, 0);
if (psmouse->pktcnt == 3) {
/* Once dev3 mouse device is registered report data */
if (likely(!IS_ERR_OR_NULL(priv->dev3)))
alps_report_bare_ps2_packet(priv->dev3,
psmouse->packet,
true);
alps_report_bare_ps2_packet(psmouse, psmouse->packet,
true);
return PSMOUSE_FULL_PACKET;
}
return PSMOUSE_GOOD_DATA;
@ -2281,10 +2290,12 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->set_abs_params = alps_set_abs_params_mt;
priv->nibble_commands = alps_v3_nibble_commands;
priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
priv->x_max = 1360;
priv->y_max = 660;
priv->x_bits = 23;
priv->y_bits = 12;
if (alps_dolphin_get_device_area(psmouse, priv))
return -EIO;
break;
case ALPS_PROTO_V6:
@ -2303,9 +2314,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->set_abs_params = alps_set_abs_params_mt;
priv->nibble_commands = alps_v3_nibble_commands;
priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
if (alps_dolphin_get_device_area(psmouse, priv))
return -EIO;
priv->x_max = 0xfff;
priv->y_max = 0x7ff;
if (priv->fw_ver[1] != 0xba)
priv->flags |= ALPS_BUTTONPAD;

View File

@ -152,6 +152,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
{ANY_BOARD_ID, ANY_BOARD_ID},
1024, 5022, 2508, 4832
},
{
(const char * const []){"LEN2006", NULL},
{2691, 2691},
1024, 5045, 2457, 4832
},
{
(const char * const []){"LEN2006", NULL},
{ANY_BOARD_ID, ANY_BOARD_ID},
@ -189,7 +194,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN2003",
"LEN2004", /* L440 */
"LEN2005",
"LEN2006",
"LEN2006", /* Edge E440/E540 */
"LEN2007",
"LEN2008",
"LEN2009",

View File

@ -169,7 +169,7 @@ static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
{
cmd->raw_cmd[0] &= ~(0xffffUL << 32);
cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
cmd->raw_cmd[0] |= ((u64)devid) << 32;
}
@ -802,6 +802,7 @@ static int its_alloc_tables(struct its_node *its)
int i;
int psz = SZ_64K;
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_WaWb;
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
@ -848,7 +849,7 @@ retry_baser:
val = (virt_to_phys(base) |
(type << GITS_BASER_TYPE_SHIFT) |
((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
GITS_BASER_WaWb |
cache |
shr |
GITS_BASER_VALID);
@ -874,9 +875,12 @@ retry_baser:
* Shareability didn't stick. Just use
* whatever the read reported, which is likely
* to be the only thing this redistributor
* supports.
* supports. If that's zero, make it
* non-cacheable as well.
*/
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr)
cache = GITS_BASER_nC;
goto retry_baser;
}
@ -980,16 +984,39 @@ static void its_cpu_init_lpis(void)
tmp = readq_relaxed(rbase + GICR_PROPBASER);
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
/*
* The HW reports non-shareable, we must
* remove the cacheability attributes as
* well.
*/
val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
GICR_PROPBASER_CACHEABILITY_MASK);
val |= GICR_PROPBASER_nC;
writeq_relaxed(val, rbase + GICR_PROPBASER);
}
pr_info_once("GIC: using cache flushing for LPI property table\n");
gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
}
/* set PENDBASE */
val = (page_to_phys(pend_page) |
GICR_PROPBASER_InnerShareable |
GICR_PROPBASER_WaWb);
GICR_PENDBASER_InnerShareable |
GICR_PENDBASER_WaWb);
writeq_relaxed(val, rbase + GICR_PENDBASER);
tmp = readq_relaxed(rbase + GICR_PENDBASER);
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
/*
* The HW reports non-shareable, we must remove the
* cacheability attributes as well.
*/
val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
GICR_PENDBASER_CACHEABILITY_MASK);
val |= GICR_PENDBASER_nC;
writeq_relaxed(val, rbase + GICR_PENDBASER);
}
/* Enable LPIs */
val = readl_relaxed(rbase + GICR_CTLR);
@ -1026,7 +1053,7 @@ static void its_cpu_init_collection(void)
* This ITS wants a linear CPU number.
*/
target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
target = GICR_TYPER_CPU_NUMBER(target);
target = GICR_TYPER_CPU_NUMBER(target) << 16;
}
/* Perform collection mapping */
@ -1422,14 +1449,26 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
writeq_relaxed(baser, its->base + GITS_CBASER);
tmp = readq_relaxed(its->base + GITS_CBASER);
writeq_relaxed(0, its->base + GITS_CWRITER);
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
/*
* The HW reports non-shareable, we must
* remove the cacheability attributes as
* well.
*/
baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
GITS_CBASER_CACHEABILITY_MASK);
baser |= GITS_CBASER_nC;
writeq_relaxed(baser, its->base + GITS_CBASER);
}
pr_info("ITS: using cache flushing for cmd queue\n");
its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
}
writeq_relaxed(0, its->base + GITS_CWRITER);
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
if (!its->domain) {

View File

@ -2729,16 +2729,11 @@ static int mvneta_stop(struct net_device *dev)
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvneta_port *pp = netdev_priv(dev);
int ret;
if (!pp->phy_dev)
return -ENOTSUPP;
ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
if (!ret)
mvneta_adjust_link(dev);
return ret;
return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
}
/* Ethtool methods */

View File

@ -725,7 +725,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
* on the host, we deprecate the error message for this
* specific command/input_mod/opcode_mod/fw-status to be debug.
*/
if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
if (op == MLX4_CMD_SET_PORT &&
(in_modifier == 1 || in_modifier == 2) &&
op_modifier == MLX4_SET_PORT_IB_OPCODE &&
context->fw_status == CMD_STAT_BAD_SIZE)
mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",

View File

@ -1008,8 +1008,7 @@ err:
static int xennet_change_mtu(struct net_device *dev, int mtu)
{
int max = xennet_can_sg(dev) ?
XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
if (mtu > max)
return -EINVAL;
@ -1279,8 +1278,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netdev->ethtool_ops = &xennet_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
np->netdev = netdev;
netif_carrier_off(netdev);

View File

@ -450,12 +450,17 @@ static struct of_bus *of_match_bus(struct device_node *np)
return NULL;
}
static int of_empty_ranges_quirk(void)
static int of_empty_ranges_quirk(struct device_node *np)
{
if (IS_ENABLED(CONFIG_PPC)) {
/* To save cycles, we cache the result */
/* To save cycles, we cache the result for global "Mac" setting */
static int quirk_state = -1;
/* PA-SEMI sdc DT bug */
if (of_device_is_compatible(np, "1682m-sdc"))
return true;
/* Make quirk cached */
if (quirk_state < 0)
quirk_state =
of_machine_is_compatible("Power Macintosh") ||
@ -490,7 +495,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
* This code is only enabled on powerpc. --gcl
*/
ranges = of_get_property(parent, rprop, &rlen);
if (ranges == NULL && !of_empty_ranges_quirk()) {
if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
pr_debug("OF: no ranges; cannot translate\n");
return 1;
}

View File

@ -38,6 +38,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
config IIO_SIMPLE_DUMMY_BUFFER
bool "Buffered capture support"
select IIO_BUFFER
select IIO_TRIGGER
select IIO_KFIFO_BUF
help
Add buffered data capture to the simple dummy driver.

View File

@ -592,6 +592,7 @@ int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
mutex_init(&data->lock);
indio_dev->dev.parent = dev;
indio_dev->name = dev->driver->name;
indio_dev->info = &hmc5843_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = data->variant->channels;

View File

@ -921,6 +921,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
sport->port.membase + UARTPFIFO);
/* explicitly clear RDRF */
readb(sport->port.membase + UARTSR1);
/* flush Tx and Rx FIFO */
writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
sport->port.membase + UARTCFIFO);
@ -1076,6 +1079,8 @@ static int lpuart_startup(struct uart_port *port)
sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK) + 1);
sport->port.fifosize = sport->txfifo_size;
sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK) + 1);

View File

@ -963,6 +963,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
free_irq(ourport->tx_irq, ourport);
tx_enabled(port) = 0;
ourport->tx_claimed = 0;
ourport->tx_mode = 0;
}
if (ourport->rx_claimed) {

View File

@ -387,6 +387,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
status = PORT_PLC;
port_change_bit = "link state";
break;
case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
status = PORT_CEC;
port_change_bit = "config error";
break;
default:
/* Should never happen */
return;
@ -588,6 +592,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
status |= USB_PORT_STAT_C_LINK_STATE << 16;
if ((raw_port_status & PORT_WRC))
status |= USB_PORT_STAT_C_BH_RESET << 16;
if ((raw_port_status & PORT_CEC))
status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
}
if (hcd->speed != HCD_USB3) {
@ -1005,6 +1011,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_PORT_LINK_STATE:
case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
xhci_clear_port_change_bit(xhci, wValue, wIndex,
port_array[wIndex], temp);
break;
@ -1069,7 +1076,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
*/
status = bus_state->resuming_ports;
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */

View File

@ -115,6 +115,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
xhci->quirks |= XHCI_AVOID_BEI;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
@ -130,7 +131,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
* PPT chipsets.
*/
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
xhci->quirks |= XHCI_AVOID_BEI;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {

View File

@ -1203,7 +1203,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
if (udc->driver) {
dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
spin_unlock(&udc->lock);
spin_unlock_irqrestore(&udc->lock, flags);
return -EBUSY;
}

View File

@ -604,6 +604,7 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
/*
* ELV devices:
*/
@ -1883,8 +1884,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
{
struct usb_device *udev = serial->dev;
if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
(udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
return ftdi_jtag_probe(serial);
if (udev->product &&
(!strcmp(udev->product, "BeagleBone/XDS100V2") ||
!strcmp(udev->product, "SNAP Connect E10")))
return ftdi_jtag_probe(serial);
return 0;

View File

@ -561,6 +561,12 @@
*/
#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
/*
* Synapse Wireless product ids (FTDI_VID)
* http://www.synapse-wireless.com
*/
#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
/********************************/
/** third-party VID/PID combos **/

View File

@ -61,6 +61,7 @@ struct keyspan_pda_private {
/* For Xircom PGSDB9 and older Entrega version of the same device */
#define XIRCOM_VENDOR_ID 0x085a
#define XIRCOM_FAKE_ID 0x8027
#define XIRCOM_FAKE_ID_2 0x8025 /* "PGMFHUB" serial */
#define ENTREGA_VENDOR_ID 0x1645
#define ENTREGA_FAKE_ID 0x8093
@ -70,6 +71,7 @@ static const struct usb_device_id id_table_combined[] = {
#endif
#ifdef XIRCOM
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
{ USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
#endif
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
@ -93,6 +95,7 @@ static const struct usb_device_id id_table_fake[] = {
#ifdef XIRCOM
static const struct usb_device_id id_table_fake_xircom[] = {
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
{ USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
{ }
};

View File

@ -55,6 +55,23 @@ config XEN_BALLOON_MEMORY_HOTPLUG
In that case step 3 should be omitted.
config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
int "Hotplugged memory limit (in GiB) for a PV guest"
default 512 if X86_64
default 4 if X86_32
range 0 64 if X86_32
depends on XEN_HAVE_PVMMU
depends on XEN_BALLOON_MEMORY_HOTPLUG
help
Maxmium amount of memory (in GiB) that a PV guest can be
expanded to when using memory hotplug.
A PV guest can have more memory than this limit if is
started with a larger maximum.
This value is used to allocate enough space in internal
tables needed for physical memory administration.
config XEN_SCRUB_PAGES
bool "Scrub pages before returning them to system"
depends on XEN_BALLOON

View File

@ -229,6 +229,29 @@ static enum bp_state reserve_additional_memory(long credit)
balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* add_memory() will build page tables for the new memory so
* the p2m must contain invalid entries so the correct
* non-present PTEs will be written.
*
* If a failure occurs, the original (identity) p2m entries
* are not restored since this region is now known not to
* conflict with any devices.
*/
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn, i;
pfn = PFN_DOWN(hotplug_start_paddr);
for (i = 0; i < balloon_hotplug; i++) {
if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
pr_warn("set_phys_to_machine() failed, no memory added\n");
return BP_ECANCELED;
}
}
}
#endif
rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
if (rc) {

View File

@ -1,6 +1,9 @@
/*
* fs/cifs/cifsencrypt.c
*
* Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP
* for more detailed information
*
* Copyright (C) International Business Machines Corp., 2005,2013
* Author(s): Steve French (sfrench@us.ibm.com)
*
@ -515,7 +518,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
__func__);
return rc;
}
} else if (ses->serverName) {
} else {
/* We use ses->serverName if no domain name available */
len = strlen(ses->serverName);
server = kmalloc(2 + (len * 2), GFP_KERNEL);

View File

@ -1599,6 +1599,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
pr_warn("CIFS: username too long\n");
goto cifs_parse_mount_err;
}
kfree(vol->username);
vol->username = kstrdup(string, GFP_KERNEL);
if (!vol->username)
goto cifs_parse_mount_err;
@ -1700,6 +1702,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
goto cifs_parse_mount_err;
}
kfree(vol->domainname);
vol->domainname = kstrdup(string, GFP_KERNEL);
if (!vol->domainname) {
pr_warn("CIFS: no memory for domainname\n");
@ -1731,6 +1734,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
if (strncasecmp(string, "default", 7) != 0) {
kfree(vol->iocharset);
vol->iocharset = kstrdup(string,
GFP_KERNEL);
if (!vol->iocharset) {
@ -2913,8 +2917,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
* calling name ends in null (byte 16) from old smb
* convention.
*/
if (server->workstation_RFC1001_name &&
server->workstation_RFC1001_name[0] != 0)
if (server->workstation_RFC1001_name[0] != 0)
rfc1002mangle(ses_init_buf->trailer.
session_req.calling_name,
server->workstation_RFC1001_name,
@ -3692,6 +3695,12 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
#endif /* CIFS_WEAK_PW_HASH */
rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
bcc_ptr, nls_codepage);
if (rc) {
cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n",
__func__, rc);
cifs_buf_release(smb_buffer);
return rc;
}
bcc_ptr += CIFS_AUTH_RESP_SIZE;
if (ses->capabilities & CAP_UNICODE) {

View File

@ -1823,6 +1823,7 @@ refind_writable:
cifsFileInfo_put(inv_file);
spin_lock(&cifs_file_list_lock);
++refind;
inv_file = NULL;
goto refind_writable;
}
}

View File

@ -771,6 +771,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
cifs_buf_release(srchinf->ntwrk_buf_start);
}
kfree(srchinf);
if (rc)
goto cgii_exit;
} else
goto cgii_exit;

View File

@ -322,7 +322,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
/* return pointer to beginning of data area, ie offset from SMB start */
if ((*off != 0) && (*len != 0))
return hdr->ProtocolId + *off;
return (char *)(&hdr->ProtocolId[0]) + *off;
else
return NULL;
}

View File

@ -684,7 +684,8 @@ smb2_clone_range(const unsigned int xid,
/* No need to change MaxChunks since already set to 1 */
chunk_sizes_updated = true;
}
} else
goto cchunk_out;
}
cchunk_out:

View File

@ -1218,7 +1218,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
struct smb2_ioctl_req *req;
struct smb2_ioctl_rsp *rsp;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
struct cifs_ses *ses;
struct kvec iov[2];
int resp_buftype;
int num_iovecs;
@ -1233,6 +1233,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if (plen)
*plen = 0;
if (tcon)
ses = tcon->ses;
else
return -EIO;
if (ses && (ses->server))
server = ses->server;
else
@ -1296,14 +1301,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
if ((rc != 0) && (rc != -EINVAL)) {
if (tcon)
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
} else if (rc == -EINVAL) {
if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
(opcode != FSCTL_SRV_COPYCHUNK)) {
if (tcon)
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
}
}
@ -1629,7 +1632,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
if ((rc != 0) && tcon)
if (rc != 0)
cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
free_rsp_buf(resp_buftype, iov[0].iov_base);
@ -2114,7 +2117,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec iov[2];
int rc = 0;
int len;
int resp_buftype;
int resp_buftype = CIFS_NO_BUFFER;
unsigned char *bufptr;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;

View File

@ -126,8 +126,23 @@
#define GICR_PROPBASER_WaWb (5U << 7)
#define GICR_PROPBASER_RaWaWt (6U << 7)
#define GICR_PROPBASER_RaWaWb (7U << 7)
#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
#define GICR_PROPBASER_IDBITS_MASK (0x1f)
#define GICR_PENDBASER_NonShareable (0U << 10)
#define GICR_PENDBASER_InnerShareable (1U << 10)
#define GICR_PENDBASER_OuterShareable (2U << 10)
#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
#define GICR_PENDBASER_nCnB (0U << 7)
#define GICR_PENDBASER_nC (1U << 7)
#define GICR_PENDBASER_RaWt (2U << 7)
#define GICR_PENDBASER_RaWb (3U << 7)
#define GICR_PENDBASER_WaWt (4U << 7)
#define GICR_PENDBASER_WaWb (5U << 7)
#define GICR_PENDBASER_RaWaWt (6U << 7)
#define GICR_PENDBASER_RaWaWb (7U << 7)
#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
/*
* Re-Distributor registers, offsets from SGI_base
*/
@ -182,6 +197,7 @@
#define GITS_CBASER_WaWb (5UL << 59)
#define GITS_CBASER_RaWaWt (6UL << 59)
#define GITS_CBASER_RaWaWb (7UL << 59)
#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59)
#define GITS_CBASER_NonShareable (0UL << 10)
#define GITS_CBASER_InnerShareable (1UL << 10)
#define GITS_CBASER_OuterShareable (2UL << 10)
@ -198,6 +214,7 @@
#define GITS_BASER_WaWb (5UL << 59)
#define GITS_BASER_RaWaWt (6UL << 59)
#define GITS_BASER_RaWaWb (7UL << 59)
#define GITS_BASER_CACHEABILITY_MASK (7UL << 59)
#define GITS_BASER_TYPE_SHIFT (56)
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)

View File

@ -4,5 +4,6 @@
#include <linux/compiler.h>
unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__;
#endif /* _LCM_H */

View File

@ -2182,6 +2182,12 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
DECLARE_PER_CPU(int, xmit_recursion);
static inline int dev_recursion_level(void)
{
return this_cpu_read(xmit_recursion);
}
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);

View File

@ -455,22 +455,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
#endif
static inline int sk_mc_loop(struct sock *sk)
{
if (!sk)
return 1;
switch (sk->sk_family) {
case AF_INET:
return inet_sk(sk)->mc_loop;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
return inet6_sk(sk)->mc_loop;
#endif
}
WARN_ON(1);
return 1;
}
bool ip_call_ra_chain(struct sk_buff *skb);
/*

View File

@ -174,7 +174,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
{
struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));

View File

@ -1762,6 +1762,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
bool sk_mc_loop(struct sock *sk);
static inline bool sk_can_gso(const struct sock *sk)
{
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);

View File

@ -973,7 +973,8 @@ struct input_keymap_entry {
*/
#define MT_TOOL_FINGER 0
#define MT_TOOL_PEN 1
#define MT_TOOL_MAX 1
#define MT_TOOL_PALM 2
#define MT_TOOL_MAX 2
/*
* Values describing the status of a force-feedback effect

View File

@ -12,3 +12,14 @@ unsigned long lcm(unsigned long a, unsigned long b)
return 0;
}
EXPORT_SYMBOL_GPL(lcm);
unsigned long lcm_not_zero(unsigned long a, unsigned long b)
{
unsigned long l = lcm(a, b);
if (l)
return l;
return (b ? : a);
}
EXPORT_SYMBOL_GPL(lcm_not_zero);

View File

@ -2870,7 +2870,9 @@ static void skb_update_prio(struct sk_buff *skb)
#define skb_update_prio(skb)
#endif
static DEFINE_PER_CPU(int, xmit_recursion);
DEFINE_PER_CPU(int, xmit_recursion);
EXPORT_SYMBOL(xmit_recursion);
#define RECURSION_LIMIT 10
/**

View File

@ -165,9 +165,9 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
spin_lock(&net->rules_mod_lock);
list_del_rcu(&ops->list);
fib_rules_cleanup_ops(ops);
spin_unlock(&net->rules_mod_lock);
fib_rules_cleanup_ops(ops);
kfree_rcu(ops, rcu);
}
EXPORT_SYMBOL_GPL(fib_rules_unregister);

View File

@ -198,8 +198,10 @@ static int __peernet2id(struct net *net, struct net *peer, bool alloc)
*/
int peernet2id(struct net *net, struct net *peer)
{
int id = __peernet2id(net, peer, true);
bool alloc = atomic_read(&peer->count) == 0 ? false : true;
int id;
id = __peernet2id(net, peer, alloc);
return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
}
EXPORT_SYMBOL(peernet2id);
@ -338,7 +340,7 @@ static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
struct net *net, *tmp, *peer;
struct net *net, *tmp;
struct list_head net_kill_list;
LIST_HEAD(net_exit_list);
@ -354,6 +356,14 @@ static void cleanup_net(struct work_struct *work)
list_for_each_entry(net, &net_kill_list, cleanup_list) {
list_del_rcu(&net->list);
list_add_tail(&net->exit_list, &net_exit_list);
for_each_net(tmp) {
int id = __peernet2id(tmp, net, false);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
}
idr_destroy(&net->netns_ids);
}
rtnl_unlock();
@ -379,26 +389,12 @@ static void cleanup_net(struct work_struct *work)
*/
rcu_barrier();
rtnl_lock();
/* Finally it is safe to free my network namespace structure */
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
/* Unreference net from all peers (no need to loop over
* net_exit_list because idr_destroy() will be called for each
* element of this list.
*/
for_each_net(peer) {
int id = __peernet2id(peer, net, false);
if (id >= 0)
idr_remove(&peer->netns_ids, id);
}
idr_destroy(&net->netns_ids);
list_del_init(&net->exit_list);
put_user_ns(net->user_ns);
net_drop_ns(net);
}
rtnl_unlock();
}
static DECLARE_WORK(net_cleanup_work, cleanup_net);

View File

@ -653,6 +653,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
sock_reset_flag(sk, bit);
}
bool sk_mc_loop(struct sock *sk)
{
if (dev_recursion_level())
return false;
if (!sk)
return true;
switch (sk->sk_family) {
case AF_INET:
return inet_sk(sk)->mc_loop;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
return inet6_sk(sk)->mc_loop;
#endif
}
WARN_ON(1);
return true;
}
EXPORT_SYMBOL(sk_mc_loop);
/*
* This is meant for all protocols to use and covers goings on
* at the socket level. Everything here is generic.

View File

@ -248,7 +248,9 @@ void __init dn_fib_rules_init(void)
void __exit dn_fib_rules_cleanup(void)
{
rtnl_lock();
fib_rules_unregister(dn_fib_rules_ops);
rtnl_unlock();
rcu_barrier();
}

View File

@ -513,12 +513,10 @@ static struct net_device *dev_to_net_device(struct device *dev)
#ifdef CONFIG_OF
static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
struct dsa_chip_data *cd,
int chip_index,
int chip_index, int port_index,
struct device_node *link)
{
int ret;
const __be32 *reg;
int link_port_addr;
int link_sw_addr;
struct device_node *parent_sw;
int len;
@ -531,6 +529,10 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
if (!reg || (len != sizeof(*reg) * 2))
return -EINVAL;
/*
* Get the destination switch number from the second field of its 'reg'
* property, i.e. for "reg = <0x19 1>" sw_addr is '1'.
*/
link_sw_addr = be32_to_cpup(reg + 1);
if (link_sw_addr >= pd->nr_chips)
@ -547,20 +549,9 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
memset(cd->rtable, -1, pd->nr_chips * sizeof(s8));
}
reg = of_get_property(link, "reg", NULL);
if (!reg) {
ret = -EINVAL;
goto out;
}
link_port_addr = be32_to_cpup(reg);
cd->rtable[link_sw_addr] = link_port_addr;
cd->rtable[link_sw_addr] = port_index;
return 0;
out:
kfree(cd->rtable);
return ret;
}
static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
@ -670,7 +661,7 @@ static int dsa_of_probe(struct device *dev)
if (!strcmp(port_name, "dsa") && link &&
pd->nr_chips > 1) {
ret = dsa_of_setup_routing_table(pd, cd,
chip_index, link);
chip_index, port_index, link);
if (ret)
goto out_free_chip;
}

View File

@ -1175,13 +1175,11 @@ static void ip_fib_net_exit(struct net *net)
unsigned int i;
rtnl_lock();
#ifdef CONFIG_IP_MULTIPLE_TABLES
RCU_INIT_POINTER(net->ipv4.fib_local, NULL);
RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
#endif
for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[i];
struct hlist_node *tmp;

View File

@ -276,11 +276,13 @@ static void __net_exit ipmr_rules_exit(struct net *net)
{
struct mr_table *mrt, *next;
rtnl_lock();
list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
list_del(&mrt->list);
ipmr_free_table(mrt);
}
fib_rules_unregister(net->ipv4.mr_rules_ops);
rtnl_unlock();
}
#else
#define ipmr_for_each_table(mrt, net) \
@ -306,7 +308,10 @@ static int __net_init ipmr_rules_init(struct net *net)
static void __net_exit ipmr_rules_exit(struct net *net)
{
rtnl_lock();
ipmr_free_table(net->ipv4.mrt);
net->ipv4.mrt = NULL;
rtnl_unlock();
}
#endif

View File

@ -3105,10 +3105,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (!first_ackt.v64)
first_ackt = last_ackt;
if (!(sacked & TCPCB_SACKED_ACKED))
if (!(sacked & TCPCB_SACKED_ACKED)) {
reord = min(pkts_acked, reord);
if (!after(scb->end_seq, tp->high_seq))
flag |= FLAG_ORIG_SACK_ACKED;
if (!after(scb->end_seq, tp->high_seq))
flag |= FLAG_ORIG_SACK_ACKED;
}
}
if (sacked & TCPCB_SACKED_ACKED)

View File

@ -315,7 +315,9 @@ out_fib6_rules_ops:
static void __net_exit fib6_rules_net_exit(struct net *net)
{
rtnl_lock();
fib_rules_unregister(net->ipv6.fib6_rules_ops);
rtnl_unlock();
}
static struct pernet_operations fib6_rules_net_ops = {

View File

@ -542,7 +542,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
struct sk_buff *frag;
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
struct ipv6hdr *tmp_hdr;
struct frag_hdr *fh;
unsigned int mtu, hlen, left, len;

View File

@ -265,8 +265,8 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
list_del(&mrt->list);
ip6mr_free_table(mrt);
}
rtnl_unlock();
fib_rules_unregister(net->ipv6.mr6_rules_ops);
rtnl_unlock();
}
#else
#define ip6mr_for_each_table(mrt, net) \
@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
static void ip6mr_free_table(struct mr6_table *mrt)
{
del_timer(&mrt->ipmr_expire_timer);
del_timer_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt);
kfree(mrt);
}

View File

@ -1871,6 +1871,7 @@ static int __init l2tp_init(void)
l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
if (!l2tp_wq) {
pr_err("alloc_workqueue failed\n");
unregister_pernet_device(&l2tp_net_ops);
rc = -ENOMEM;
goto out;
}