1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

David Ahern's dump indexing bug fix in 'net' overlapped the
change of the function signature of inet6_fill_ifaddr() in
'net-next'.  Trivially resolved.

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2018-10-21 11:54:28 -07:00
commit 21ea1d36f6
42 changed files with 308 additions and 139 deletions

View File

@ -37,6 +37,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
KBUILD_CFLAGS += -Wno-pointer-sign
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n

View File

@ -389,6 +389,13 @@
* that register for the time this macro runs
*/
/*
* The high bits of the CS dword (__csh) are used for
* CS_FROM_ENTRY_STACK and CS_FROM_USER_CR3. Clear them in case
* hardware didn't do this for us.
*/
andl $(0x0000ffff), PT_CS(%esp)
/* Are we on the entry stack? Bail out if not! */
movl PER_CPU_VAR(cpu_entry_area), %ecx
addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
@ -407,12 +414,6 @@
/* Load top of task-stack into %edi */
movl TSS_entry2task_stack(%edi), %edi
/*
* Clear unused upper bits of the dword containing the word-sized CS
* slot in pt_regs in case hardware didn't clear it for us.
*/
andl $(0x0000ffff), PT_CS(%esp)
/* Special case - entry from kernel mode via entry stack */
#ifdef CONFIG_VM86
movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS

View File

@ -1187,6 +1187,16 @@ ENTRY(paranoid_entry)
xorl %ebx, %ebx
1:
/*
* Always stash CR3 in %r14. This value will be restored,
* verbatim, at exit. Needed if paranoid_entry interrupted
* another entry that already switched to the user CR3 value
* but has not yet returned to userspace.
*
* This is also why CS (stashed in the "iret frame" by the
* hardware at entry) can not be used: this may be a return
* to kernel code, but with a user CR3 value.
*/
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
ret
@ -1211,11 +1221,13 @@ ENTRY(paranoid_exit)
testl %ebx, %ebx /* swapgs needed? */
jnz .Lparanoid_exit_no_swapgs
TRACE_IRQS_IRETQ
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
SWAPGS_UNSAFE_STACK
jmp .Lparanoid_exit_restore
.Lparanoid_exit_no_swapgs:
TRACE_IRQS_IRETQ_DEBUG
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
.Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
@ -1626,6 +1638,7 @@ end_repeat_nmi:
movq $-1, %rsi
call do_nmi
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
testl %ebx, %ebx /* swapgs needed? */

View File

@ -528,7 +528,7 @@ static inline void fpregs_activate(struct fpu *fpu)
static inline void
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
if (old_fpu->initialized) {
if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else

View File

@ -185,22 +185,22 @@ do { \
typeof(var) pfo_ret__; \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_arg(1)",%0" \
asm volatile(op "b "__percpu_arg(1)",%0"\
: "=q" (pfo_ret__) \
: "m" (var)); \
break; \
case 2: \
asm(op "w "__percpu_arg(1)",%0" \
asm volatile(op "w "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
case 4: \
asm(op "l "__percpu_arg(1)",%0" \
asm volatile(op "l "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
case 8: \
asm(op "q "__percpu_arg(1)",%0" \
asm volatile(op "q "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \

View File

@ -314,7 +314,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
* thread's fpu state, reconstruct fxstate from the fsave
* header. Validate and sanitize the copied state.
*/
struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env;
int err = 0;

View File

@ -42,10 +42,8 @@ IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
int __init pci_swiotlb_detect_4gb(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64
if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
swiotlb = 1;
#endif
/*
* If SME is active then swiotlb will be set to 1 so that bounce

View File

@ -25,7 +25,7 @@
#include <asm/time.h>
#ifdef CONFIG_X86_64
__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES;
__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
#endif
unsigned long profile_pc(struct pt_regs *regs)

View File

@ -58,7 +58,7 @@ struct cyc2ns {
static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
void cyc2ns_read_begin(struct cyc2ns_data *data)
void __always_inline cyc2ns_read_begin(struct cyc2ns_data *data)
{
int seq, idx;
@ -75,7 +75,7 @@ void cyc2ns_read_begin(struct cyc2ns_data *data)
} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
}
void cyc2ns_read_end(void)
void __always_inline cyc2ns_read_end(void)
{
preempt_enable_notrace();
}
@ -104,7 +104,7 @@ void cyc2ns_read_end(void)
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
struct cyc2ns_data data;
unsigned long long ns;

View File

@ -29,9 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
{
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop;
unsigned int granularity;
unsigned int op;
int alignment;
sector_t bs_mask;
if (!q)
@ -54,38 +52,16 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
while (nr_sects) {
unsigned int req_sects;
sector_t end_sect, tmp;
unsigned int req_sects = nr_sects;
sector_t end_sect;
/*
* Issue in chunks of the user defined max discard setting,
* ensuring that bi_size doesn't overflow
*/
req_sects = min_t(sector_t, nr_sects,
q->limits.max_discard_sectors);
if (!req_sects)
goto fail;
if (req_sects > UINT_MAX >> 9)
req_sects = UINT_MAX >> 9;
/*
* If splitting a request, and the next starting sector would be
* misaligned, stop the discard at the previous aligned sector.
*/
end_sect = sector + req_sects;
tmp = end_sect;
if (req_sects < nr_sects &&
sector_div(tmp, granularity) != alignment) {
end_sect = end_sect - alignment;
sector_div(end_sect, granularity);
end_sect = end_sect * granularity + alignment;
req_sects = end_sect - sector;
}
bio = next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;

View File

@ -174,6 +174,11 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
state->crtcs[i].state = NULL;
state->crtcs[i].old_state = NULL;
state->crtcs[i].new_state = NULL;
if (state->crtcs[i].commit) {
drm_crtc_commit_put(state->crtcs[i].commit);
state->crtcs[i].commit = NULL;
}
}
for (i = 0; i < config->num_total_plane; i++) {

View File

@ -1408,15 +1408,16 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
struct drm_crtc_commit *commit = new_crtc_state->commit;
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
int ret;
if (!commit)
crtc = old_state->crtcs[i].ptr;
if (!crtc || !commit)
continue;
ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
@ -1934,6 +1935,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
drm_crtc_commit_get(commit);
commit->abort_completion = true;
state->crtcs[i].commit = commit;
drm_crtc_commit_get(commit);
}
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {

View File

@ -81,9 +81,19 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
int i;
for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
unsigned long ideal = rate * i;
u64 ideal = (u64)rate * i;
unsigned long rounded;
/*
* ideal has overflowed the max value that can be stored in an
* unsigned long, and every clk operation we might do on a
* truncated u64 value will give us incorrect results.
* Let's just stop there since bigger dividers will result in
* the same overflow issue.
*/
if (ideal > ULONG_MAX)
goto out;
rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
ideal);

View File

@ -1346,6 +1346,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
{ "ELAN0618", 0 },
{ "ELAN061C", 0 },
{ "ELAN061D", 0 },
{ "ELAN0622", 0 },
{ "ELAN1000", 0 },

View File

@ -3143,8 +3143,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
}
mutex_lock(&ns->ctrl->subsys->lock);
nvme_mpath_clear_current_path(ns);
list_del_rcu(&ns->siblings);
nvme_mpath_clear_current_path(ns);
mutex_unlock(&ns->ctrl->subsys->lock);
down_write(&ns->ctrl->namespaces_rwsem);

View File

@ -310,17 +310,17 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
if (difference & ACM_CTRL_DSR)
acm->iocount.dsr++;
if (difference & ACM_CTRL_BRK)
acm->iocount.brk++;
if (difference & ACM_CTRL_RI)
acm->iocount.rng++;
if (difference & ACM_CTRL_DCD)
acm->iocount.dcd++;
if (difference & ACM_CTRL_FRAMING)
if (newctrl & ACM_CTRL_BRK)
acm->iocount.brk++;
if (newctrl & ACM_CTRL_RI)
acm->iocount.rng++;
if (newctrl & ACM_CTRL_FRAMING)
acm->iocount.frame++;
if (difference & ACM_CTRL_PARITY)
if (newctrl & ACM_CTRL_PARITY)
acm->iocount.parity++;
if (difference & ACM_CTRL_OVERRUN)
if (newctrl & ACM_CTRL_OVERRUN)
acm->iocount.overrun++;
spin_unlock_irqrestore(&acm->read_lock, flags);
@ -355,7 +355,6 @@ static void acm_ctrl_irq(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
acm->nb_index = 0;
dev_dbg(&acm->control->dev,
"%s - urb shutting down with status: %d\n",
__func__, status);
@ -1642,6 +1641,7 @@ static int acm_pre_reset(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
clear_bit(EVENT_RX_STALL, &acm->flags);
acm->nb_index = 0; /* pending control transfers are lost */
return 0;
}

View File

@ -1474,8 +1474,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
u = 0;
switch (uurb->type) {
case USBDEVFS_URB_TYPE_CONTROL:
if (is_in)
allow_short = true;
if (!usb_endpoint_xfer_control(&ep->desc))
return -EINVAL;
/* min 8 byte setup packet */
@ -1505,6 +1503,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
is_in = 0;
uurb->endpoint &= ~USB_DIR_IN;
}
if (is_in)
allow_short = true;
snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",

View File

@ -221,6 +221,8 @@
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
#include <linux/nospec.h>
#include "configfs.h"
@ -3152,6 +3154,7 @@ static struct config_group *fsg_lun_make(struct config_group *group,
fsg_opts = to_fsg_opts(&group->cg_item);
if (num >= FSG_MAX_LUNS)
return ERR_PTR(-ERANGE);
num = array_index_nospec(num, FSG_MAX_LUNS);
mutex_lock(&fsg_opts->lock);
if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {

View File

@ -179,10 +179,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||

View File

@ -161,6 +161,8 @@ static int intel_xhci_usb_remove(struct platform_device *pdev)
{
struct intel_xhci_usb_data *data = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
usb_role_switch_unregister(data->role_sw);
return 0;
}

View File

@ -318,8 +318,9 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
struct vhci_hcd *vhci_hcd;
struct vhci *vhci;
int retval = 0;
int rhport;
int rhport = -1;
unsigned long flags;
bool invalid_rhport = false;
u32 prev_port_status[VHCI_HC_PORTS];
@ -334,9 +335,19 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh("typeReq %x wValue %x wIndex %x\n", typeReq, wValue,
wIndex);
if (wIndex > VHCI_HC_PORTS)
pr_err("invalid port number %d\n", wIndex);
rhport = wIndex - 1;
/*
* wIndex can be 0 for some request types (typeReq). rhport is
* in valid range when wIndex >= 1 and < VHCI_HC_PORTS.
*
* Reference port_status[] only with valid rhport when
* invalid_rhport is false.
*/
if (wIndex < 1 || wIndex > VHCI_HC_PORTS) {
invalid_rhport = true;
if (wIndex > VHCI_HC_PORTS)
pr_err("invalid port number %d\n", wIndex);
} else
rhport = wIndex - 1;
vhci_hcd = hcd_to_vhci_hcd(hcd);
vhci = vhci_hcd->vhci;
@ -345,8 +356,9 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* store old status and compare now and old later */
if (usbip_dbg_flag_vhci_rh) {
memcpy(prev_port_status, vhci_hcd->port_status,
sizeof(prev_port_status));
if (!invalid_rhport)
memcpy(prev_port_status, vhci_hcd->port_status,
sizeof(prev_port_status));
}
switch (typeReq) {
@ -354,8 +366,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(" ClearHubFeature\n");
break;
case ClearPortFeature:
if (rhport < 0)
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (hcd->speed == HCD_USB3) {
@ -415,9 +429,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case GetPortStatus:
usbip_dbg_vhci_rh(" GetPortStatus port %x\n", wIndex);
if (wIndex < 1) {
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
retval = -EPIPE;
goto error;
}
/* we do not care about resume. */
@ -513,16 +528,20 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
if (rhport < 0)
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
break;
case USB_PORT_FEAT_POWER:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_POWER\n");
if (rhport < 0)
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
if (hcd->speed == HCD_USB3)
vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
else
@ -531,8 +550,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_BH_PORT_RESET:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
if (rhport < 0)
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
/* Applicable only for USB3.0 hub */
if (hcd->speed != HCD_USB3) {
pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
@ -543,8 +564,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_RESET:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_RESET\n");
if (rhport < 0)
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
vhci_hcd->port_status[rhport] = 0;
@ -565,8 +588,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
wValue);
if (rhport < 0)
if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
goto error;
}
if (hcd->speed == HCD_USB3) {
if ((vhci_hcd->port_status[rhport] &
USB_SS_PORT_STAT_POWER) != 0) {
@ -608,7 +633,7 @@ error:
if (usbip_dbg_flag_vhci_rh) {
pr_debug("port %d\n", rhport);
/* Only dump valid port status */
if (rhport >= 0) {
if (!invalid_rhport) {
dump_port_status_diff(prev_port_status[rhport],
vhci_hcd->port_status[rhport],
hcd->speed == HCD_USB3);
@ -618,8 +643,10 @@ error:
spin_unlock_irqrestore(&vhci->lock, flags);
if ((vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0)
if (!invalid_rhport &&
(vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0) {
usb_hcd_poll_rh_status(hcd);
}
return retval;
}

View File

@ -153,6 +153,17 @@ struct __drm_planes_state {
struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state, *old_state, *new_state;
/**
* @commit:
*
* A reference to the CRTC commit object that is kept for use by
* drm_atomic_helper_wait_for_flip_done() after
* drm_atomic_helper_commit_hw_done() is called. This ensures that a
* concurrent commit won't free a commit object that is still in use.
*/
struct drm_crtc_commit *commit;
s32 __user *out_fence_ptr;
u64 last_vblank_count;
};

View File

@ -4001,7 +4001,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* put back on, and if we advance min_vruntime, we'll be placed back
* further than we started -- ie. we'll be penalized.
*/
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
}
@ -4476,9 +4476,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
/*
* Add to the _head_ of the list, so that an already-started
* distribute_cfs_runtime will not see us
* distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
* not running add to the tail so that later runqueues don't get starved.
*/
list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
if (cfs_b->distribute_running)
list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
else
list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
/*
* If we're the first throttled task, make sure the bandwidth
@ -4622,14 +4626,16 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
* in us over-using our runtime if it is all used during this loop, but
* only by limited amounts in that extreme case.
*/
while (throttled && cfs_b->runtime > 0) {
while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
runtime = cfs_b->runtime;
cfs_b->distribute_running = 1;
raw_spin_unlock(&cfs_b->lock);
/* we can't nest cfs_b->lock while distributing bandwidth */
runtime = distribute_cfs_runtime(cfs_b, runtime,
runtime_expires);
raw_spin_lock(&cfs_b->lock);
cfs_b->distribute_running = 0;
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
cfs_b->runtime -= min(runtime, cfs_b->runtime);
@ -4740,6 +4746,11 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
/* confirm we're still not at a refresh boundary */
raw_spin_lock(&cfs_b->lock);
if (cfs_b->distribute_running) {
raw_spin_unlock(&cfs_b->lock);
return;
}
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
raw_spin_unlock(&cfs_b->lock);
return;
@ -4749,6 +4760,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
runtime = cfs_b->runtime;
expires = cfs_b->runtime_expires;
if (runtime)
cfs_b->distribute_running = 1;
raw_spin_unlock(&cfs_b->lock);
if (!runtime)
@ -4759,6 +4773,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
raw_spin_lock(&cfs_b->lock);
if (expires == cfs_b->runtime_expires)
cfs_b->runtime -= min(runtime, cfs_b->runtime);
cfs_b->distribute_running = 0;
raw_spin_unlock(&cfs_b->lock);
}
@ -4867,6 +4882,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
cfs_b->period_timer.function = sched_cfs_period_timer;
hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cfs_b->slack_timer.function = sched_cfs_slack_timer;
cfs_b->distribute_running = 0;
}
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)

View File

@ -346,6 +346,8 @@ struct cfs_bandwidth {
int nr_periods;
int nr_throttled;
u64 throttled_time;
bool distribute_running;
#endif
};

View File

@ -738,16 +738,30 @@ static void free_synth_field(struct synth_field *field)
kfree(field);
}
static struct synth_field *parse_synth_field(char *field_type,
char *field_name)
static struct synth_field *parse_synth_field(int argc, char **argv,
int *consumed)
{
struct synth_field *field;
const char *prefix = NULL;
char *field_type = argv[0], *field_name;
int len, ret = 0;
char *array;
if (field_type[0] == ';')
field_type++;
if (!strcmp(field_type, "unsigned")) {
if (argc < 3)
return ERR_PTR(-EINVAL);
prefix = "unsigned ";
field_type = argv[1];
field_name = argv[2];
*consumed = 3;
} else {
field_name = argv[1];
*consumed = 2;
}
len = strlen(field_name);
if (field_name[len - 1] == ';')
field_name[len - 1] = '\0';
@ -760,11 +774,15 @@ static struct synth_field *parse_synth_field(char *field_type,
array = strchr(field_name, '[');
if (array)
len += strlen(array);
if (prefix)
len += strlen(prefix);
field->type = kzalloc(len, GFP_KERNEL);
if (!field->type) {
ret = -ENOMEM;
goto free;
}
if (prefix)
strcat(field->type, prefix);
strcat(field->type, field_type);
if (array) {
strcat(field->type, array);
@ -1009,7 +1027,7 @@ static int create_synth_event(int argc, char **argv)
struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
struct synth_event *event = NULL;
bool delete_event = false;
int i, n_fields = 0, ret = 0;
int i, consumed = 0, n_fields = 0, ret = 0;
char *name;
mutex_lock(&synth_event_mutex);
@ -1061,16 +1079,16 @@ static int create_synth_event(int argc, char **argv)
goto err;
}
field = parse_synth_field(argv[i], argv[i + 1]);
field = parse_synth_field(argc - i, &argv[i], &consumed);
if (IS_ERR(field)) {
ret = PTR_ERR(field);
goto err;
}
fields[n_fields] = field;
i++; n_fields++;
fields[n_fields++] = field;
i += consumed - 1;
}
if (i < argc) {
if (i < argc && strcmp(argv[i], ";") != 0) {
ret = -EINVAL;
goto err;
}

View File

@ -1167,8 +1167,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
neigh->nud_state = new;
err = 0;
notify = old & NUD_VALID;
if (((old & (NUD_INCOMPLETE | NUD_PROBE)) ||
(flags & NEIGH_UPDATE_F_ADMIN)) &&
if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
(new & NUD_FAILED)) {
neigh_invalidate(neigh);
notify = 1;

View File

@ -1846,8 +1846,9 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
if (skb->ip_summed == CHECKSUM_COMPLETE) {
int delta = skb->len - len;
skb->csum = csum_sub(skb->csum,
skb_checksum(skb, len, delta, 0));
skb->csum = csum_block_sub(skb->csum,
skb_checksum(skb, len, delta, 0),
len);
}
return __pskb_trim(skb, len);
}

View File

@ -4972,12 +4972,14 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
/* unicast address incl. temp addr */
list_for_each_entry(ifa, &idev->addr_list, if_list) {
if (++ip_idx < s_ip_idx)
continue;
if (ip_idx < s_ip_idx)
goto next;
err = inet6_fill_ifaddr(skb, ifa, fillargs);
if (err < 0)
break;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
next:
ip_idx++;
}
break;
}

View File

@ -377,6 +377,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002

View File

@ -952,6 +952,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_HPAGE_1M 156
#define KVM_CAP_NESTED_STATE 157
#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
#define KVM_CAP_MSR_PLATFORM_INFO 159
#ifdef KVM_CAP_IRQ_ROUTING

View File

@ -36,7 +36,7 @@ static const char *tracing_path_tracefs_mount(void)
__tracing_path_set("", mnt);
return mnt;
return tracing_path;
}
static const char *tracing_path_debugfs_mount(void)
@ -49,7 +49,7 @@ static const char *tracing_path_debugfs_mount(void)
__tracing_path_set("tracing/", mnt);
return mnt;
return tracing_path;
}
const char *tracing_path_mount(void)

View File

@ -833,7 +833,7 @@ ifndef NO_JVMTI
JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
else
ifneq (,$(wildcard /usr/sbin/alternatives))
JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
endif
endif
ifndef JDIR

View File

@ -635,7 +635,7 @@ $(LIBPERF_IN): prepare FORCE
$(LIB_FILE): $(LIBPERF_IN)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
$(LIBTRACEEVENT): FORCE
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a

View File

@ -981,6 +981,7 @@ int cmd_report(int argc, const char **argv)
.id_index = perf_event__process_id_index,
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
.event_update = perf_event__process_event_update,
.feature = process_feature_event,
.ordered_events = true,
.ordering_requires_timestamps = true,

View File

@ -188,7 +188,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
"Filter": "filter_band0=1200",
"Filter": "filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@ -199,7 +199,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
"Filter": "filter_band1=2000",
"Filter": "filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@ -210,7 +210,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
"Filter": "filter_band2=3000",
"Filter": "filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@ -221,7 +221,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
"Filter": "filter_band3=4000",
"Filter": "filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",
@ -232,7 +232,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
"Filter": "edge=1,filter_band0=1200",
"Filter": "edge=1,filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@ -243,7 +243,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
"Filter": "edge=1,filter_band1=2000",
"Filter": "edge=1,filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@ -254,7 +254,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
"Filter": "edge=1,filter_band2=4000",
"Filter": "edge=1,filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@ -265,7 +265,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
"Filter": "edge=1,filter_band3=4000",
"Filter": "edge=1,filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",

View File

@ -187,7 +187,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
"Filter": "filter_band0=1200",
"Filter": "filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@ -198,7 +198,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
"Filter": "filter_band1=2000",
"Filter": "filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@ -209,7 +209,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
"Filter": "filter_band2=3000",
"Filter": "filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@ -220,7 +220,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
"Filter": "filter_band3=4000",
"Filter": "filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",
@ -231,7 +231,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
"Filter": "edge=1,filter_band0=1200",
"Filter": "edge=1,filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@ -242,7 +242,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
"Filter": "edge=1,filter_band1=2000",
"Filter": "edge=1,filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@ -253,7 +253,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
"Filter": "edge=1,filter_band2=4000",
"Filter": "edge=1,filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@ -264,7 +264,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
"Filter": "edge=1,filter_band3=4000",
"Filter": "edge=1,filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",

View File

@ -1081,6 +1081,7 @@ void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max
}
*size += sizeof(struct cpu_map_data);
*size = PERF_ALIGN(*size, sizeof(u64));
return zalloc(*size);
}
@ -1560,26 +1561,9 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
return NULL;
}
try_again:
al->map = map_groups__find(mg, al->addr);
if (al->map == NULL) {
/*
* If this is outside of all known maps, and is a negative
* address, try to look it up in the kernel dso, as it might be
* a vsyscall or vdso (which executes in user-mode).
*
* XXX This is nasty, we should have a symbol list in the
* "[vdso]" dso, but for now lets use the old trick of looking
* in the whole kernel symbol list.
*/
if (cpumode == PERF_RECORD_MISC_USER && machine &&
mg != &machine->kmaps &&
machine__kernel_ip(machine, al->addr)) {
mg = &machine->kmaps;
load_map = true;
goto try_again;
}
} else {
if (al->map != NULL) {
/*
* Kernel maps might be changed when loading symbols so loading
* must be done prior to using kernel maps.

View File

@ -1089,6 +1089,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->exclude_user = 1;
}
if (evsel->own_cpus)
evsel->attr.read_format |= PERF_FORMAT_ID;
/*
* Apply event specific term settings,
* it overloads any global configuration.

View File

@ -930,13 +930,14 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
static __u64 pmu_format_max_value(const unsigned long *format)
{
__u64 w = 0;
int fbit;
int w;
for_each_set_bit(fbit, format, PERF_PMU_FORMAT_BITS)
w |= (1ULL << fbit);
return w;
w = bitmap_weight(format, PERF_PMU_FORMAT_BITS);
if (!w)
return 0;
if (w < 64)
return (1ULL << w) - 1;
return -1;
}
/*

View File

@ -85,6 +85,9 @@ static struct symbol *new_inline_sym(struct dso *dso,
struct symbol *inline_sym;
char *demangled = NULL;
if (!funcname)
funcname = "??";
if (dso) {
demangled = dso__demangle_sym(dso, 0, funcname);
if (demangled)

View File

@ -141,6 +141,10 @@ echo "Import devices from localhost - should work"
src/usbip attach -r localhost -b $busid;
echo "=============================================================="
# Wait for sysfs file to be updated. Without this sleep, usbip port
# shows no imported devices.
sleep 3;
echo "List imported devices - expect to see imported devices";
src/usbip port;
echo "=============================================================="

View File

@ -0,0 +1,80 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test synthetic_events syntax parser
do_reset() {
reset_trigger
echo > set_event
clear_trace
}
fail() { #msg
do_reset
echo $1
exit_fail
}
if [ ! -f set_event ]; then
echo "event tracing is not supported"
exit_unsupported
fi
if [ ! -f synthetic_events ]; then
echo "synthetic event is not supported"
exit_unsupported
fi
reset_tracer
do_reset
echo "Test synthetic_events syntax parser"
echo > synthetic_events
# synthetic event must have a field
! echo "myevent" >> synthetic_events
echo "myevent u64 var1" >> synthetic_events
# synthetic event must be found in synthetic_events
grep "myevent[[:space:]]u64 var1" synthetic_events
# it is not possible to add same name event
! echo "myevent u64 var2" >> synthetic_events
# Non-append open will cleanup all events and add new one
echo "myevent u64 var2" > synthetic_events
# multiple fields with different spaces
echo "myevent u64 var1; u64 var2;" > synthetic_events
grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
echo "myevent u64 var1 ; u64 var2 ;" > synthetic_events
grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
echo "myevent u64 var1 ;u64 var2" > synthetic_events
grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
# test field types
echo "myevent u32 var" > synthetic_events
echo "myevent u16 var" > synthetic_events
echo "myevent u8 var" > synthetic_events
echo "myevent s64 var" > synthetic_events
echo "myevent s32 var" > synthetic_events
echo "myevent s16 var" > synthetic_events
echo "myevent s8 var" > synthetic_events
echo "myevent char var" > synthetic_events
echo "myevent int var" > synthetic_events
echo "myevent long var" > synthetic_events
echo "myevent pid_t var" > synthetic_events
echo "myevent unsigned char var" > synthetic_events
echo "myevent unsigned int var" > synthetic_events
echo "myevent unsigned long var" > synthetic_events
grep "myevent[[:space:]]unsigned long var" synthetic_events
# test string type
echo "myevent char var[10]" > synthetic_events
grep "myevent[[:space:]]char\[10\] var" synthetic_events
do_reset
exit 0