1
0
Fork 0

This is the 5.4.70 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl99WdwACgkQONu9yGCS
 aT4X0hAAiyB6vsAHKTdVF+lDc2phpblY7ryi/Pe56X3ie+aqZqd/SjVQ4MhZ4lO1
 p4JjUjHPByHon2DrMlvE4cVf45LF1qpu2qrGkes/WaLX4OgeAsWPq/i31aks4S7h
 JebCkX9UeVTLZMZ1beeqfRgsWUX75P8vhafyl5eLC5dJXzzL3G4V9Kz+LUKBuuHU
 FoEmJHab2olfk1G2wgb9xOlmkeKt1xLBbfW8grv5c4zWQexiXJc+6M8CsQVdErLe
 eK0XDbBcUMNpcCKFRyJ1NO/Y94Yui0YPQQziSHuSR+E+1PDd9roI+DbgInC82R9t
 aO0XTnt+9mqxpuYZNHhwa/KHOg/rv/t2Y4GFySOUwaOBGtGRWVRgJfH1AoZu+rdk
 OWamt8c5Uej8CpPtoXVLNblmnpPKavUd6dox8CyDGN/PPEsk0VoXvENZMjXaAA9Q
 L0AaKdHnk+JK5HCou5vuw1AhoItB/jbldU7qy7cprZXDS7tEuGXVldRJkU5yVWyI
 Z4/+ldQOAGSrgvEZz6DxxpQ/RJO1+ai/pJJXXcRu5JghlgnZHrrg1i2/EEMnCNy+
 Kd/aXReVwiVX6wozrkOyeyymQaLe8wYeFWrc6vx0Z0L5cw7LVV7Oc0WflRXO0rq8
 WN4qmmmL6URFwcVhHlaG8AHlaMfD/yawyb7bRIbxXwt2lHe5G1k=
 =JxXq
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.70' into 5.4-2.2.x-imx

This is the 5.4.70 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
5.4-rM2-2.2.x-imx-squashed
Andrey Zhizhikin 2020-10-07 08:37:06 +00:00
commit a17bf4eda5
66 changed files with 570 additions and 420 deletions

View File

@ -99,7 +99,7 @@ Field 10 -- # of milliseconds spent doing I/Os
Since 5.0 this field counts jiffies when at least one request was Since 5.0 this field counts jiffies when at least one request was
started or completed. If request runs more than 2 jiffies then some started or completed. If request runs more than 2 jiffies then some
I/O time will not be accounted unless there are other requests. I/O time might be not accounted in case of concurrent requests.
Field 11 -- weighted # of milliseconds spent doing I/Os Field 11 -- weighted # of milliseconds spent doing I/Os
This field is incremented at each I/O start, I/O completion, I/O This field is incremented at each I/O start, I/O completion, I/O
@ -133,6 +133,9 @@ are summed (possibly overflowing the unsigned long variable they are
summed to) and the result given to the user. There is no convenient summed to) and the result given to the user. There is no convenient
user interface for accessing the per-CPU counters themselves. user interface for accessing the per-CPU counters themselves.
Since 4.19 request times are measured with nanoseconds precision and
truncated to milliseconds before showing in this interface.
Disks vs Partitions Disks vs Partitions
------------------- -------------------

View File

@ -20,8 +20,9 @@ Required properties:
- gpio-controller : Marks the device node as a GPIO controller - gpio-controller : Marks the device node as a GPIO controller
- interrupts : Interrupt specifier, see interrupt-controller/interrupts.txt - interrupts : Interrupt specifier, see interrupt-controller/interrupts.txt
- interrupt-controller : Mark the GPIO controller as an interrupt-controller - interrupt-controller : Mark the GPIO controller as an interrupt-controller
- ngpios : number of GPIO lines, see gpio.txt - ngpios : number of *hardware* GPIO lines, see gpio.txt. This will expose
(should be multiple of 8, up to 80 pins) 2 software GPIOs per hardware GPIO: one for hardware input, one for hardware
output. Up to 80 pins, must be a multiple of 8.
- clocks : A phandle to the APB clock for SGPM clock division - clocks : A phandle to the APB clock for SGPM clock division
- bus-frequency : SGPM CLK frequency - bus-frequency : SGPM CLK frequency

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 69 SUBLEVEL = 70
EXTRAVERSION = EXTRAVERSION =
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus

View File

@ -518,7 +518,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end) if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start), memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start), args->nid, args->zone, page_to_pfn(map_start),
MEMMAP_EARLY, NULL); MEMINIT_EARLY, NULL);
return 0; return 0;
} }
@ -527,8 +527,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn) unsigned long start_pfn)
{ {
if (!vmem_map) { if (!vmem_map) {
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, memmap_init_zone(size, nid, zone, start_pfn,
NULL); MEMINIT_EARLY, NULL);
} else { } else {
struct page *start; struct page *start;
struct memmap_init_callback_data args; struct memmap_init_callback_data args;

View File

@ -1754,14 +1754,14 @@ defer:
schedule_work(&bio_dirty_work); schedule_work(&bio_dirty_work);
} }
void update_io_ticks(struct hd_struct *part, unsigned long now) void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
{ {
unsigned long stamp; unsigned long stamp;
again: again:
stamp = READ_ONCE(part->stamp); stamp = READ_ONCE(part->stamp);
if (unlikely(stamp != now)) { if (unlikely(stamp != now)) {
if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) { if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
__part_stat_add(part, io_ticks, 1); __part_stat_add(part, io_ticks, end ? now - stamp : 1);
} }
} }
if (part->partno) { if (part->partno) {
@ -1777,7 +1777,7 @@ void generic_start_io_acct(struct request_queue *q, int op,
part_stat_lock(); part_stat_lock();
update_io_ticks(part, jiffies); update_io_ticks(part, jiffies, false);
part_stat_inc(part, ios[sgrp]); part_stat_inc(part, ios[sgrp]);
part_stat_add(part, sectors[sgrp], sectors); part_stat_add(part, sectors[sgrp], sectors);
part_inc_in_flight(q, part, op_is_write(op)); part_inc_in_flight(q, part, op_is_write(op));
@ -1795,7 +1795,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op,
part_stat_lock(); part_stat_lock();
update_io_ticks(part, now); update_io_ticks(part, now, true);
part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration)); part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
part_stat_add(part, time_in_queue, duration); part_stat_add(part, time_in_queue, duration);
part_dec_in_flight(q, part, op_is_write(req_op)); part_dec_in_flight(q, part, op_is_write(req_op));

View File

@ -1334,7 +1334,7 @@ void blk_account_io_done(struct request *req, u64 now)
part_stat_lock(); part_stat_lock();
part = req->part; part = req->part;
update_io_ticks(part, jiffies); update_io_ticks(part, jiffies, true);
part_stat_inc(part, ios[sgrp]); part_stat_inc(part, ios[sgrp]);
part_stat_add(part, nsecs[sgrp], now - req->start_time_ns); part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
part_stat_add(part, time_in_queue, nsecs_to_jiffies64(now - req->start_time_ns)); part_stat_add(part, time_in_queue, nsecs_to_jiffies64(now - req->start_time_ns));
@ -1376,7 +1376,7 @@ void blk_account_io_start(struct request *rq, bool new_io)
rq->part = part; rq->part = part;
} }
update_io_ticks(part, jiffies); update_io_ticks(part, jiffies, false);
part_stat_unlock(); part_stat_unlock();
} }

View File

@ -758,41 +758,10 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
return pfn_to_nid(pfn); return pfn_to_nid(pfn);
} }
/* register memory section under specified node if it spans that node */ static int do_register_memory_block_under_node(int nid,
static int register_mem_sect_under_node(struct memory_block *mem_blk, struct memory_block *mem_blk)
void *arg)
{ {
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE; int ret;
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
int ret, nid = *(int *)arg;
unsigned long pfn;
for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
int page_nid;
/*
* memory block could have several absent sections from start.
* skip pfn range from absent section
*/
if (!pfn_present(pfn)) {
pfn = round_down(pfn + PAGES_PER_SECTION,
PAGES_PER_SECTION) - 1;
continue;
}
/*
* We need to check if page belongs to nid only for the boot
* case, during hotplug we know that all pages in the memory
* block belong to the same node.
*/
if (system_state == SYSTEM_BOOTING) {
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
if (page_nid != nid)
continue;
}
/* /*
* If this memory block spans multiple nodes, we only indicate * If this memory block spans multiple nodes, we only indicate
@ -810,10 +779,58 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
&node_devices[nid]->dev.kobj, &node_devices[nid]->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj)); kobject_name(&node_devices[nid]->dev.kobj));
} }
/* register memory section under specified node if it spans that node */
static int register_mem_block_under_node_early(struct memory_block *mem_blk,
void *arg)
{
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
int nid = *(int *)arg;
unsigned long pfn;
for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
int page_nid;
/*
* memory block could have several absent sections from start.
* skip pfn range from absent section
*/
if (!pfn_present(pfn)) {
pfn = round_down(pfn + PAGES_PER_SECTION,
PAGES_PER_SECTION) - 1;
continue;
}
/*
* We need to check if page belongs to nid only at the boot
* case because node's ranges can be interleaved.
*/
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
if (page_nid != nid)
continue;
return do_register_memory_block_under_node(nid, mem_blk);
}
/* mem section does not span the specified node */ /* mem section does not span the specified node */
return 0; return 0;
} }
/*
* During hotplug we know that all pages in the memory block belong to the same
* node.
*/
static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
void *arg)
{
int nid = *(int *)arg;
return do_register_memory_block_under_node(nid, mem_blk);
}
/* /*
* Unregister a memory block device under the node it spans. Memory blocks * Unregister a memory block device under the node it spans. Memory blocks
* with multiple nodes cannot be offlined and therefore also never be removed. * with multiple nodes cannot be offlined and therefore also never be removed.
@ -829,11 +846,19 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
} }
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
enum meminit_context context)
{ {
walk_memory_blocks_func_t func;
if (context == MEMINIT_HOTPLUG)
func = register_mem_block_under_node_hotplug;
else
func = register_mem_block_under_node_early;
return walk_memory_blocks(PFN_PHYS(start_pfn), return walk_memory_blocks(PFN_PHYS(start_pfn),
PFN_PHYS(end_pfn - start_pfn), (void *)&nid, PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
register_mem_sect_under_node); func);
} }
#ifdef CONFIG_HUGETLBFS #ifdef CONFIG_HUGETLBFS

View File

@ -927,7 +927,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = {
GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0), GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0),
GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0), GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0), GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
CLK_IGNORE_UNUSED, 0), CLK_IGNORE_UNUSED, 0),
GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0,
@ -969,7 +969,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
0), 0),
GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0), GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0),
GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
CLK_IGNORE_UNUSED, 0), CLK_IGNORE_UNUSED, 0),
GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0,

View File

@ -107,7 +107,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
{ STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux), { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
0, 0, 2, 0xB0, 1}, 0, 0, 2, 0xB0, 1},
{ STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux, { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2}, ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 2, 0xB0, 2},
{ STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3}, ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3},
{ STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux, { STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,

View File

@ -1569,9 +1569,6 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
unsigned long flags = 0; unsigned long flags = 0;
unsigned long input_rate; unsigned long input_rate;
if (clk_pll_is_enabled(hw))
return 0;
input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))

View File

@ -28,6 +28,7 @@ static irqreturn_t gx6605s_timer_interrupt(int irq, void *dev)
void __iomem *base = timer_of_base(to_timer_of(ce)); void __iomem *base = timer_of_base(to_timer_of(ce));
writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS); writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS);
writel_relaxed(0, base + TIMER_INI);
ce->event_handler(ce); ce->event_handler(ce);

View File

@ -1115,8 +1115,8 @@ static const struct aspeed_gpio_config ast2500_config =
static const struct aspeed_bank_props ast2600_bank_props[] = { static const struct aspeed_bank_props ast2600_bank_props[] = {
/* input output */ /* input output */
{5, 0xffffffff, 0x0000ffff}, /* U/V/W/X */ {5, 0xffffffff, 0xffffff00}, /* U/V/W/X */
{6, 0xffff0000, 0x0fff0000}, /* Y/Z */ {6, 0x0000ffff, 0x0000ffff}, /* Y/Z */
{ }, { },
}; };

View File

@ -497,6 +497,7 @@ static int __init gpio_mockup_init(void)
err = platform_driver_register(&gpio_mockup_driver); err = platform_driver_register(&gpio_mockup_driver);
if (err) { if (err) {
gpio_mockup_err("error registering platform driver\n"); gpio_mockup_err("error registering platform driver\n");
debugfs_remove_recursive(gpio_mockup_dbg_dir);
return err; return err;
} }
@ -527,6 +528,7 @@ static int __init gpio_mockup_init(void)
gpio_mockup_err("error registering device"); gpio_mockup_err("error registering device");
platform_driver_unregister(&gpio_mockup_driver); platform_driver_unregister(&gpio_mockup_driver);
gpio_mockup_unregister_pdevs(); gpio_mockup_unregister_pdevs();
debugfs_remove_recursive(gpio_mockup_dbg_dir);
return PTR_ERR(pdev); return PTR_ERR(pdev);
} }

View File

@ -245,6 +245,7 @@ static int gpio_siox_probe(struct siox_device *sdevice)
girq->chip = &ddata->ichip; girq->chip = &ddata->ichip;
girq->default_type = IRQ_TYPE_NONE; girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq; girq->handler = handle_level_irq;
girq->threaded = true;
ret = devm_gpiochip_add_data(dev, &ddata->gchip, NULL); ret = devm_gpiochip_add_data(dev, &ddata->gchip, NULL);
if (ret) if (ret)

View File

@ -149,17 +149,20 @@ static int sprd_gpio_irq_set_type(struct irq_data *data,
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1); sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1);
sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
irq_set_handler_locked(data, handle_edge_irq); irq_set_handler_locked(data, handle_edge_irq);
break; break;
case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_EDGE_FALLING:
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
irq_set_handler_locked(data, handle_edge_irq); irq_set_handler_locked(data, handle_edge_irq);
break; break;
case IRQ_TYPE_EDGE_BOTH: case IRQ_TYPE_EDGE_BOTH:
sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0);
sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1);
sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1);
irq_set_handler_locked(data, handle_edge_irq); irq_set_handler_locked(data, handle_edge_irq);
break; break;
case IRQ_TYPE_LEVEL_HIGH: case IRQ_TYPE_LEVEL_HIGH:

View File

@ -209,7 +209,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
continue; continue;
tc3589x_gpio->oldregs[i][j] = new; tc3589x_gpio->oldregs[i][j] = new;
tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new); tc3589x_reg_write(tc3589x, regmap[i] + j, new);
} }
} }

View File

@ -17,7 +17,17 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/string.h> #include <linux/string.h>
#define MAX_NR_SGPIO 80 /*
* MAX_NR_HW_GPIO represents the number of actual hardware-supported GPIOs (ie,
* slots within the clocked serial GPIO data). Since each HW GPIO is both an
* input and an output, we provide MAX_NR_HW_GPIO * 2 lines on our gpiochip
* device.
*
* We use SGPIO_OUTPUT_OFFSET to define the split between the inputs and
* outputs; the inputs start at line 0, the outputs start at OUTPUT_OFFSET.
*/
#define MAX_NR_HW_SGPIO 80
#define SGPIO_OUTPUT_OFFSET MAX_NR_HW_SGPIO
#define ASPEED_SGPIO_CTRL 0x54 #define ASPEED_SGPIO_CTRL 0x54
@ -30,8 +40,8 @@ struct aspeed_sgpio {
struct clk *pclk; struct clk *pclk;
spinlock_t lock; spinlock_t lock;
void __iomem *base; void __iomem *base;
uint32_t dir_in[3];
int irq; int irq;
int n_sgpio;
}; };
struct aspeed_sgpio_bank { struct aspeed_sgpio_bank {
@ -111,31 +121,69 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
} }
} }
#define GPIO_BANK(x) ((x) >> 5) #define GPIO_BANK(x) ((x % SGPIO_OUTPUT_OFFSET) >> 5)
#define GPIO_OFFSET(x) ((x) & 0x1f) #define GPIO_OFFSET(x) ((x % SGPIO_OUTPUT_OFFSET) & 0x1f)
#define GPIO_BIT(x) BIT(GPIO_OFFSET(x)) #define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
static const struct aspeed_sgpio_bank *to_bank(unsigned int offset) static const struct aspeed_sgpio_bank *to_bank(unsigned int offset)
{ {
unsigned int bank = GPIO_BANK(offset); unsigned int bank;
bank = GPIO_BANK(offset);
WARN_ON(bank >= ARRAY_SIZE(aspeed_sgpio_banks)); WARN_ON(bank >= ARRAY_SIZE(aspeed_sgpio_banks));
return &aspeed_sgpio_banks[bank]; return &aspeed_sgpio_banks[bank];
} }
static int aspeed_sgpio_init_valid_mask(struct gpio_chip *gc,
unsigned long *valid_mask, unsigned int ngpios)
{
struct aspeed_sgpio *sgpio = gpiochip_get_data(gc);
int n = sgpio->n_sgpio;
int c = SGPIO_OUTPUT_OFFSET - n;
WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2);
/* input GPIOs in the lower range */
bitmap_set(valid_mask, 0, n);
bitmap_clear(valid_mask, n, c);
/* output GPIOS above SGPIO_OUTPUT_OFFSET */
bitmap_set(valid_mask, SGPIO_OUTPUT_OFFSET, n);
bitmap_clear(valid_mask, SGPIO_OUTPUT_OFFSET + n, c);
return 0;
}
static void aspeed_sgpio_irq_init_valid_mask(struct gpio_chip *gc,
unsigned long *valid_mask, unsigned int ngpios)
{
struct aspeed_sgpio *sgpio = gpiochip_get_data(gc);
int n = sgpio->n_sgpio;
WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2);
/* input GPIOs in the lower range */
bitmap_set(valid_mask, 0, n);
bitmap_clear(valid_mask, n, ngpios - n);
}
static bool aspeed_sgpio_is_input(unsigned int offset)
{
return offset < SGPIO_OUTPUT_OFFSET;
}
static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset) static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
{ {
struct aspeed_sgpio *gpio = gpiochip_get_data(gc); struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
const struct aspeed_sgpio_bank *bank = to_bank(offset); const struct aspeed_sgpio_bank *bank = to_bank(offset);
unsigned long flags; unsigned long flags;
enum aspeed_sgpio_reg reg; enum aspeed_sgpio_reg reg;
bool is_input;
int rc = 0; int rc = 0;
spin_lock_irqsave(&gpio->lock, flags); spin_lock_irqsave(&gpio->lock, flags);
is_input = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset); reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata;
reg = is_input ? reg_val : reg_rdata;
rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset)); rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
spin_unlock_irqrestore(&gpio->lock, flags); spin_unlock_irqrestore(&gpio->lock, flags);
@ -143,22 +191,31 @@ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
return rc; return rc;
} }
static void sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val) static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
{ {
struct aspeed_sgpio *gpio = gpiochip_get_data(gc); struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
const struct aspeed_sgpio_bank *bank = to_bank(offset); const struct aspeed_sgpio_bank *bank = to_bank(offset);
void __iomem *addr; void __iomem *addr_r, *addr_w;
u32 reg = 0; u32 reg = 0;
addr = bank_reg(gpio, bank, reg_val); if (aspeed_sgpio_is_input(offset))
reg = ioread32(addr); return -EINVAL;
/* Since this is an output, read the cached value from rdata, then
* update val. */
addr_r = bank_reg(gpio, bank, reg_rdata);
addr_w = bank_reg(gpio, bank, reg_val);
reg = ioread32(addr_r);
if (val) if (val)
reg |= GPIO_BIT(offset); reg |= GPIO_BIT(offset);
else else
reg &= ~GPIO_BIT(offset); reg &= ~GPIO_BIT(offset);
iowrite32(reg, addr); iowrite32(reg, addr_w);
return 0;
} }
static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
@ -175,43 +232,28 @@ static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset) static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
{ {
struct aspeed_sgpio *gpio = gpiochip_get_data(gc); return aspeed_sgpio_is_input(offset) ? 0 : -EINVAL;
unsigned long flags;
spin_lock_irqsave(&gpio->lock, flags);
gpio->dir_in[GPIO_BANK(offset)] |= GPIO_BIT(offset);
spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
} }
static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val) static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{ {
struct aspeed_sgpio *gpio = gpiochip_get_data(gc); struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
unsigned long flags; unsigned long flags;
int rc;
/* No special action is required for setting the direction; we'll
* error-out in sgpio_set_value if this isn't an output GPIO */
spin_lock_irqsave(&gpio->lock, flags); spin_lock_irqsave(&gpio->lock, flags);
rc = sgpio_set_value(gc, offset, val);
gpio->dir_in[GPIO_BANK(offset)] &= ~GPIO_BIT(offset);
sgpio_set_value(gc, offset, val);
spin_unlock_irqrestore(&gpio->lock, flags); spin_unlock_irqrestore(&gpio->lock, flags);
return 0; return rc;
} }
static int aspeed_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset) static int aspeed_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{ {
int dir_status; return !!aspeed_sgpio_is_input(offset);
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
unsigned long flags;
spin_lock_irqsave(&gpio->lock, flags);
dir_status = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset);
spin_unlock_irqrestore(&gpio->lock, flags);
return dir_status;
} }
static void irqd_to_aspeed_sgpio_data(struct irq_data *d, static void irqd_to_aspeed_sgpio_data(struct irq_data *d,
@ -402,6 +444,7 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
irq = &gpio->chip.irq; irq = &gpio->chip.irq;
irq->chip = &aspeed_sgpio_irqchip; irq->chip = &aspeed_sgpio_irqchip;
irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask;
irq->handler = handle_bad_irq; irq->handler = handle_bad_irq;
irq->default_type = IRQ_TYPE_NONE; irq->default_type = IRQ_TYPE_NONE;
irq->parent_handler = aspeed_sgpio_irq_handler; irq->parent_handler = aspeed_sgpio_irq_handler;
@ -409,17 +452,15 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
irq->parents = &gpio->irq; irq->parents = &gpio->irq;
irq->num_parents = 1; irq->num_parents = 1;
/* set IRQ settings and Enable Interrupt */ /* Apply default IRQ settings */
for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) { for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
bank = &aspeed_sgpio_banks[i]; bank = &aspeed_sgpio_banks[i];
/* set falling or level-low irq */ /* set falling or level-low irq */
iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0)); iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0));
/* trigger type is edge */ /* trigger type is edge */
iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1)); iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1));
/* dual edge trigger mode. */ /* single edge trigger */
iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_type2)); iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2));
/* enable irq */
iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_enable));
} }
return 0; return 0;
@ -452,11 +493,12 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
if (rc < 0) { if (rc < 0) {
dev_err(&pdev->dev, "Could not read ngpios property\n"); dev_err(&pdev->dev, "Could not read ngpios property\n");
return -EINVAL; return -EINVAL;
} else if (nr_gpios > MAX_NR_SGPIO) { } else if (nr_gpios > MAX_NR_HW_SGPIO) {
dev_err(&pdev->dev, "Number of GPIOs exceeds the maximum of %d: %d\n", dev_err(&pdev->dev, "Number of GPIOs exceeds the maximum of %d: %d\n",
MAX_NR_SGPIO, nr_gpios); MAX_NR_HW_SGPIO, nr_gpios);
return -EINVAL; return -EINVAL;
} }
gpio->n_sgpio = nr_gpios;
rc = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &sgpio_freq); rc = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &sgpio_freq);
if (rc < 0) { if (rc < 0) {
@ -497,7 +539,8 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
spin_lock_init(&gpio->lock); spin_lock_init(&gpio->lock);
gpio->chip.parent = &pdev->dev; gpio->chip.parent = &pdev->dev;
gpio->chip.ngpio = nr_gpios; gpio->chip.ngpio = MAX_NR_HW_SGPIO * 2;
gpio->chip.init_valid_mask = aspeed_sgpio_init_valid_mask;
gpio->chip.direction_input = aspeed_sgpio_dir_in; gpio->chip.direction_input = aspeed_sgpio_dir_in;
gpio->chip.direction_output = aspeed_sgpio_dir_out; gpio->chip.direction_output = aspeed_sgpio_dir_out;
gpio->chip.get_direction = aspeed_sgpio_get_direction; gpio->chip.get_direction = aspeed_sgpio_get_direction;
@ -509,9 +552,6 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
gpio->chip.label = dev_name(&pdev->dev); gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1; gpio->chip.base = -1;
/* set all SGPIO pins as input (1). */
memset(gpio->dir_in, 0xff, sizeof(gpio->dir_in));
aspeed_sgpio_setup_irqs(gpio, pdev); aspeed_sgpio_setup_irqs(gpio, pdev);
rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio); rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);

View File

@ -297,7 +297,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
take the current one */ take the current one */
if (active && !adev->have_disp_power_ref) { if (active && !adev->have_disp_power_ref) {
adev->have_disp_power_ref = true; adev->have_disp_power_ref = true;
goto out; return ret;
} }
/* if we have no active crtcs, then drop the power ref /* if we have no active crtcs, then drop the power ref
we got before */ we got before */

View File

@ -396,7 +396,7 @@ static struct regmap_config sun8i_mixer_regmap_config = {
.reg_bits = 32, .reg_bits = 32,
.val_bits = 32, .val_bits = 32,
.reg_stride = 4, .reg_stride = 4,
.max_register = 0xbfffc, /* guessed */ .max_register = 0xffffc, /* guessed */
}; };
static int sun8i_mixer_of_get_id(struct device_node *node) static int sun8i_mixer_of_get_id(struct device_node *node)

View File

@ -65,6 +65,9 @@ struct i2c_ram {
char res1[4]; /* Reserved */ char res1[4]; /* Reserved */
ushort rpbase; /* Relocation pointer */ ushort rpbase; /* Relocation pointer */
char res2[2]; /* Reserved */ char res2[2]; /* Reserved */
/* The following elements are only for CPM2 */
char res3[4]; /* Reserved */
uint sdmatmp; /* Internal */
}; };
#define I2COM_START 0x80 #define I2COM_START 0x80

View File

@ -786,7 +786,7 @@ static int adc5_probe(struct platform_device *pdev)
static struct platform_driver adc5_driver = { static struct platform_driver adc5_driver = {
.driver = { .driver = {
.name = "qcom-spmi-adc5.c", .name = "qcom-spmi-adc5",
.of_match_table = adc5_match_table, .of_match_table = adc5_match_table,
}, },
.probe = adc5_probe, .probe = adc5_probe,

View File

@ -282,6 +282,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse,
case TP_VARIANT_ALPS: case TP_VARIANT_ALPS:
case TP_VARIANT_ELAN: case TP_VARIANT_ELAN:
case TP_VARIANT_NXP: case TP_VARIANT_NXP:
case TP_VARIANT_JYT_SYNAPTICS:
case TP_VARIANT_SYNAPTICS:
if (variant_id) if (variant_id)
*variant_id = param[0]; *variant_id = param[0];
if (firmware_id) if (firmware_id)

View File

@ -721,6 +721,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
}, },
}, },
{
/* Acer Aspire 5 A515 */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
},
},
{ } { }
}; };

View File

@ -1299,13 +1299,17 @@ static int exynos_iommu_of_xlate(struct device *dev,
return -ENODEV; return -ENODEV;
data = platform_get_drvdata(sysmmu); data = platform_get_drvdata(sysmmu);
if (!data) if (!data) {
put_device(&sysmmu->dev);
return -ENODEV; return -ENODEV;
}
if (!owner) { if (!owner) {
owner = kzalloc(sizeof(*owner), GFP_KERNEL); owner = kzalloc(sizeof(*owner), GFP_KERNEL);
if (!owner) if (!owner) {
put_device(&sysmmu->dev);
return -ENOMEM; return -ENOMEM;
}
INIT_LIST_HEAD(&owner->controllers); INIT_LIST_HEAD(&owner->controllers);
mutex_init(&owner->rpm_lock); mutex_init(&owner->rpm_lock);

View File

@ -441,6 +441,9 @@ static void memstick_check(struct work_struct *work)
} else if (host->card->stop) } else if (host->card->stop)
host->card->stop(host->card); host->card->stop(host->card);
if (host->removing)
goto out_power_off;
card = memstick_alloc_card(host); card = memstick_alloc_card(host);
if (!card) { if (!card) {
@ -545,6 +548,7 @@ EXPORT_SYMBOL(memstick_add_host);
*/ */
void memstick_remove_host(struct memstick_host *host) void memstick_remove_host(struct memstick_host *host)
{ {
host->removing = 1;
flush_workqueue(workqueue); flush_workqueue(workqueue);
mutex_lock(&host->lock); mutex_lock(&host->lock);
if (host->card) if (host->card)

View File

@ -798,7 +798,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
{ {
return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
dmi_match(DMI_BIOS_VENDOR, "LENOVO"); (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
dmi_match(DMI_SYS_VENDOR, "IRBIS"));
} }
static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)

View File

@ -91,7 +91,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi
#define DSL CONFIG_DE2104X_DSL #define DSL CONFIG_DE2104X_DSL
#endif #endif
#define DE_RX_RING_SIZE 64 #define DE_RX_RING_SIZE 128
#define DE_TX_RING_SIZE 64 #define DE_TX_RING_SIZE 64
#define DE_RING_BYTES \ #define DE_RING_BYTES \
((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \ ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \

View File

@ -201,7 +201,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
dev_dbg(&info->control->dev, dev_dbg(&info->control->dev,
"rndis response error, code %d\n", retval); "rndis response error, code %d\n", retval);
} }
msleep(20); msleep(40);
} }
dev_dbg(&info->control->dev, "rndis response timeout\n"); dev_dbg(&info->control->dev, "rndis response timeout\n");
return -ETIMEDOUT; return -ETIMEDOUT;

View File

@ -118,6 +118,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
skb_put(skb, sizeof(struct cisco_packet)); skb_put(skb, sizeof(struct cisco_packet));
skb->priority = TC_PRIO_CONTROL; skb->priority = TC_PRIO_CONTROL;
skb->dev = dev; skb->dev = dev;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb); skb_reset_network_header(skb);
dev_queue_xmit(skb); dev_queue_xmit(skb);

View File

@ -433,6 +433,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
if (pvc->state.fecn) /* TX Congestion counter */ if (pvc->state.fecn) /* TX Congestion counter */
dev->stats.tx_compressed++; dev->stats.tx_compressed++;
skb->dev = pvc->frad; skb->dev = pvc->frad;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
dev_queue_xmit(skb); dev_queue_xmit(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@ -555,6 +557,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
skb_put(skb, i); skb_put(skb, i);
skb->priority = TC_PRIO_CONTROL; skb->priority = TC_PRIO_CONTROL;
skb->dev = dev; skb->dev = dev;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb); skb_reset_network_header(skb);
dev_queue_xmit(skb); dev_queue_xmit(skb);
@ -1041,7 +1044,7 @@ static void pvc_setup(struct net_device *dev)
{ {
dev->type = ARPHRD_DLCI; dev->type = ARPHRD_DLCI;
dev->flags = IFF_POINTOPOINT; dev->flags = IFF_POINTOPOINT;
dev->hard_header_len = 10; dev->hard_header_len = 0;
dev->addr_len = 2; dev->addr_len = 2;
netif_keep_dst(dev); netif_keep_dst(dev);
} }
@ -1093,6 +1096,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->mtu = HDLC_MAX_MTU; dev->mtu = HDLC_MAX_MTU;
dev->min_mtu = 68; dev->min_mtu = 68;
dev->max_mtu = HDLC_MAX_MTU; dev->max_mtu = HDLC_MAX_MTU;
dev->needed_headroom = 10;
dev->priv_flags |= IFF_NO_QUEUE; dev->priv_flags |= IFF_NO_QUEUE;
dev->ml_priv = pvc; dev->ml_priv = pvc;

View File

@ -251,6 +251,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
skb->priority = TC_PRIO_CONTROL; skb->priority = TC_PRIO_CONTROL;
skb->dev = dev; skb->dev = dev;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb); skb_reset_network_header(skb);
skb_queue_tail(&tx_queue, skb); skb_queue_tail(&tx_queue, skb);
} }

View File

@ -198,8 +198,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
struct net_device *dev; struct net_device *dev;
int size = skb->len; int size = skb->len;
skb->protocol = htons(ETH_P_X25);
ptr = skb_push(skb, 2); ptr = skb_push(skb, 2);
*ptr++ = size % 256; *ptr++ = size % 256;
@ -210,6 +208,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
skb->dev = dev = lapbeth->ethdev; skb->dev = dev = lapbeth->ethdev;
skb->protocol = htons(ETH_P_DEC);
skb_reset_network_header(skb); skb_reset_network_header(skb);
dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);

View File

@ -630,7 +630,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
} }
__rq_for_each_bio(bio, req) { __rq_for_each_bio(bio, req) {
u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
if (n < segments) { if (n < segments) {
@ -671,7 +671,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->write_zeroes.slba = cmnd->write_zeroes.slba =
cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length = cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
cmnd->write_zeroes.control = 0; cmnd->write_zeroes.control = 0;
@ -695,7 +695,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
@ -1680,12 +1680,6 @@ static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
} }
#endif /* CONFIG_BLK_DEV_INTEGRITY */ #endif /* CONFIG_BLK_DEV_INTEGRITY */
static void nvme_set_chunk_size(struct nvme_ns *ns)
{
u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
}
static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
{ {
struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_ctrl *ctrl = ns->ctrl;
@ -1719,8 +1713,7 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
{ {
u32 max_sectors; u64 max_blocks;
unsigned short bs = 1 << ns->lba_shift;
if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
(ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
@ -1736,11 +1729,12 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
* nvme_init_identify() if available. * nvme_init_identify() if available.
*/ */
if (ns->ctrl->max_hw_sectors == UINT_MAX) if (ns->ctrl->max_hw_sectors == UINT_MAX)
max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9; max_blocks = (u64)USHRT_MAX + 1;
else else
max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9; max_blocks = ns->ctrl->max_hw_sectors + 1;
blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors); blk_queue_max_write_zeroes_sectors(disk->queue,
nvme_lba_to_sect(ns, max_blocks));
} }
static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
@ -1774,7 +1768,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
static void nvme_update_disk_info(struct gendisk *disk, static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id) struct nvme_ns *ns, struct nvme_id_ns *id)
{ {
sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9); sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
unsigned short bs = 1 << ns->lba_shift; unsigned short bs = 1 << ns->lba_shift;
u32 atomic_bs, phys_bs, io_opt; u32 atomic_bs, phys_bs, io_opt;
@ -1840,6 +1834,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
{ {
struct nvme_ns *ns = disk->private_data; struct nvme_ns *ns = disk->private_data;
u32 iob;
/* /*
* If identify namespace failed, use default 512 byte block size so * If identify namespace failed, use default 512 byte block size so
@ -1848,7 +1843,13 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
if (ns->lba_shift == 0) if (ns->lba_shift == 0)
ns->lba_shift = 9; ns->lba_shift = 9;
ns->noiob = le16_to_cpu(id->noiob);
if ((ns->ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
is_power_of_2(ns->ctrl->max_hw_sectors))
iob = ns->ctrl->max_hw_sectors;
else
iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
/* the PI implementation requires metadata equal t10 pi tuple size */ /* the PI implementation requires metadata equal t10 pi tuple size */
@ -1857,8 +1858,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
else else
ns->pi_type = 0; ns->pi_type = 0;
if (ns->noiob) if (iob)
nvme_set_chunk_size(ns); blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob));
nvme_update_disk_info(disk, ns, id); nvme_update_disk_info(disk, ns, id);
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk) { if (ns->head->disk) {
@ -2209,9 +2210,6 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
} }
if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
is_power_of_2(ctrl->max_hw_sectors))
blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
blk_queue_virt_boundary(q, ctrl->page_size - 1); blk_queue_virt_boundary(q, ctrl->page_size - 1);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true; vwc = true;
@ -2933,10 +2931,24 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
return -EWOULDBLOCK; return -EWOULDBLOCK;
} }
nvme_get_ctrl(ctrl);
if (!try_module_get(ctrl->ops->module))
return -EINVAL;
file->private_data = ctrl; file->private_data = ctrl;
return 0; return 0;
} }
static int nvme_dev_release(struct inode *inode, struct file *file)
{
struct nvme_ctrl *ctrl =
container_of(inode->i_cdev, struct nvme_ctrl, cdev);
module_put(ctrl->ops->module);
nvme_put_ctrl(ctrl);
return 0;
}
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
{ {
struct nvme_ns *ns; struct nvme_ns *ns;
@ -2999,6 +3011,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations nvme_dev_fops = { static const struct file_operations nvme_dev_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = nvme_dev_open, .open = nvme_dev_open,
.release = nvme_dev_release,
.unlocked_ioctl = nvme_dev_ioctl, .unlocked_ioctl = nvme_dev_ioctl,
.compat_ioctl = nvme_dev_ioctl, .compat_ioctl = nvme_dev_ioctl,
}; };

View File

@ -3319,12 +3319,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
spin_lock_irqsave(&nvme_fc_lock, flags); spin_lock_irqsave(&nvme_fc_lock, flags);
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
if (lport->localport.node_name != laddr.nn || if (lport->localport.node_name != laddr.nn ||
lport->localport.port_name != laddr.pn) lport->localport.port_name != laddr.pn ||
lport->localport.port_state != FC_OBJSTATE_ONLINE)
continue; continue;
list_for_each_entry(rport, &lport->endp_list, endp_list) { list_for_each_entry(rport, &lport->endp_list, endp_list) {
if (rport->remoteport.node_name != raddr.nn || if (rport->remoteport.node_name != raddr.nn ||
rport->remoteport.port_name != raddr.pn) rport->remoteport.port_name != raddr.pn ||
rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
continue; continue;
/* if fail to get reference fall through. Will error */ /* if fail to get reference fall through. Will error */

View File

@ -384,7 +384,6 @@ struct nvme_ns {
#define NVME_NS_REMOVING 0 #define NVME_NS_REMOVING 0
#define NVME_NS_DEAD 1 #define NVME_NS_DEAD 1
#define NVME_NS_ANA_PENDING 2 #define NVME_NS_ANA_PENDING 2
u16 noiob;
struct nvme_fault_inject fault_inject; struct nvme_fault_inject fault_inject;
@ -429,9 +428,20 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
} }
static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) /*
* Convert a 512B sector number to a device logical block number.
*/
static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
{ {
return (sector >> (ns->lba_shift - 9)); return sector >> (ns->lba_shift - SECTOR_SHIFT);
}
/*
* Convert a device logical block number to a 512B sector number.
*/
static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
{
return lba << (ns->lba_shift - SECTOR_SHIFT);
} }
static inline void nvme_end_request(struct request *req, __le16 status, static inline void nvme_end_request(struct request *req, __le16 status,

View File

@ -941,13 +941,6 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
volatile struct nvme_completion *cqe = &nvmeq->cqes[idx]; volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
struct request *req; struct request *req;
if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpu(cqe->sq_id));
return;
}
/* /*
* AEN requests are special as they don't time out and can * AEN requests are special as they don't time out and can
* survive any kind of queue freeze and often don't respond to * survive any kind of queue freeze and often don't respond to
@ -962,6 +955,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
} }
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpu(cqe->sq_id));
return;
}
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
nvme_end_request(req, cqe->status, cqe->result); nvme_end_request(req, cqe->status, cqe->result);
} }

View File

@ -625,8 +625,10 @@ static int serdes_am654_probe(struct platform_device *pdev)
pm_runtime_enable(dev); pm_runtime_enable(dev);
phy = devm_phy_create(dev, NULL, &ops); phy = devm_phy_create(dev, NULL, &ops);
if (IS_ERR(phy)) if (IS_ERR(phy)) {
return PTR_ERR(phy); ret = PTR_ERR(phy);
goto clk_err;
}
phy_set_drvdata(phy, am654_phy); phy_set_drvdata(phy, am654_phy);
phy_provider = devm_of_phy_provider_register(dev, serdes_am654_xlate); phy_provider = devm_of_phy_provider_register(dev, serdes_am654_xlate);

View File

@ -414,7 +414,7 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = {
MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)), MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)),
MPP_MODE(15, MPP_MODE(15,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS), MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)), MPP_VAR_FUNCTION(0x1, "i2c0", "sda", V_98DX3236_PLUS)),
MPP_MODE(16, MPP_MODE(16,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS), MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)), MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)),

View File

@ -555,13 +555,14 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
{ {
struct fsl_espi *espi = context_data; struct fsl_espi *espi = context_data;
u32 events; u32 events, mask;
spin_lock(&espi->lock); spin_lock(&espi->lock);
/* Get interrupt events(tx/rx) */ /* Get interrupt events(tx/rx) */
events = fsl_espi_read_reg(espi, ESPI_SPIE); events = fsl_espi_read_reg(espi, ESPI_SPIE);
if (!events) { mask = fsl_espi_read_reg(espi, ESPI_SPIM);
if (!(events & mask)) {
spin_unlock(&espi->lock); spin_unlock(&espi->lock);
return IRQ_NONE; return IRQ_NONE;
} }

View File

@ -1189,7 +1189,6 @@ static int ncm_unwrap_ntb(struct gether *port,
const struct ndp_parser_opts *opts = ncm->parser_opts; const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter; int dgram_counter;
bool ndp_after_header;
/* dwSignature */ /* dwSignature */
if (get_unaligned_le32(tmp) != opts->nth_sign) { if (get_unaligned_le32(tmp) != opts->nth_sign) {
@ -1216,7 +1215,6 @@ static int ncm_unwrap_ntb(struct gether *port,
} }
ndp_index = get_ncm(&tmp, opts->ndp_index); ndp_index = get_ncm(&tmp, opts->ndp_index);
ndp_after_header = false;
/* Run through all the NDP's in the NTB */ /* Run through all the NDP's in the NTB */
do { do {
@ -1232,8 +1230,6 @@ static int ncm_unwrap_ntb(struct gether *port,
ndp_index); ndp_index);
goto err; goto err;
} }
if (ndp_index == opts->nth_size)
ndp_after_header = true;
/* /*
* walk through NDP * walk through NDP
@ -1312,37 +1308,13 @@ static int ncm_unwrap_ntb(struct gether *port,
index2 = get_ncm(&tmp, opts->dgram_item_len); index2 = get_ncm(&tmp, opts->dgram_item_len);
dg_len2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
if (index2 == 0 || dg_len2 == 0)
break;
/* wDatagramIndex[1] */ /* wDatagramIndex[1] */
if (ndp_after_header) {
if (index2 < opts->nth_size + opts->ndp_size) {
INFO(port->func.config->cdev,
"Bad index: %#X\n", index2);
goto err;
}
} else {
if (index2 < opts->nth_size + opts->dpe_size) {
INFO(port->func.config->cdev,
"Bad index: %#X\n", index2);
goto err;
}
}
if (index2 > block_len - opts->dpe_size) { if (index2 > block_len - opts->dpe_size) {
INFO(port->func.config->cdev, INFO(port->func.config->cdev,
"Bad index: %#X\n", index2); "Bad index: %#X\n", index2);
goto err; goto err;
} }
/* wDatagramLength[1] */
if ((dg_len2 < 14 + crc_len) ||
(dg_len2 > frame_max)) {
INFO(port->func.config->cdev,
"Bad dgram length: %#X\n", dg_len);
goto err;
}
/* /*
* Copy the data into a new skb. * Copy the data into a new skb.
* This ensures the truesize is correct * This ensures the truesize is correct
@ -1359,6 +1331,8 @@ static int ncm_unwrap_ntb(struct gether *port,
ndp_len -= 2 * (opts->dgram_item_len * 2); ndp_len -= 2 * (opts->dgram_item_len * 2);
dgram_counter++; dgram_counter++;
if (index2 == 0 || dg_len2 == 0)
break;
} while (ndp_len > 2 * (opts->dgram_item_len * 2)); } while (ndp_len > 2 * (opts->dgram_item_len * 2));
} while (ndp_index); } while (ndp_index);

View File

@ -384,6 +384,52 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
return val < vq->num; return val < vq->num;
} }
static struct virtio_transport vhost_transport = {
.transport = {
.get_local_cid = vhost_transport_get_local_cid,
.init = virtio_transport_do_socket_init,
.destruct = virtio_transport_destruct,
.release = virtio_transport_release,
.connect = virtio_transport_connect,
.shutdown = virtio_transport_shutdown,
.cancel_pkt = vhost_transport_cancel_pkt,
.dgram_enqueue = virtio_transport_dgram_enqueue,
.dgram_dequeue = virtio_transport_dgram_dequeue,
.dgram_bind = virtio_transport_dgram_bind,
.dgram_allow = virtio_transport_dgram_allow,
.stream_enqueue = virtio_transport_stream_enqueue,
.stream_dequeue = virtio_transport_stream_dequeue,
.stream_has_data = virtio_transport_stream_has_data,
.stream_has_space = virtio_transport_stream_has_space,
.stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
.stream_is_active = virtio_transport_stream_is_active,
.stream_allow = virtio_transport_stream_allow,
.notify_poll_in = virtio_transport_notify_poll_in,
.notify_poll_out = virtio_transport_notify_poll_out,
.notify_recv_init = virtio_transport_notify_recv_init,
.notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
.notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
.notify_send_init = virtio_transport_notify_send_init,
.notify_send_pre_block = virtio_transport_notify_send_pre_block,
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.set_buffer_size = virtio_transport_set_buffer_size,
.set_min_buffer_size = virtio_transport_set_min_buffer_size,
.set_max_buffer_size = virtio_transport_set_max_buffer_size,
.get_buffer_size = virtio_transport_get_buffer_size,
.get_min_buffer_size = virtio_transport_get_min_buffer_size,
.get_max_buffer_size = virtio_transport_get_max_buffer_size,
},
.send_pkt = vhost_transport_send_pkt,
};
static void vhost_vsock_handle_tx_kick(struct vhost_work *work) static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
{ {
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@ -440,7 +486,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid && if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
le64_to_cpu(pkt->hdr.dst_cid) == le64_to_cpu(pkt->hdr.dst_cid) ==
vhost_transport_get_local_cid()) vhost_transport_get_local_cid())
virtio_transport_recv_pkt(pkt); virtio_transport_recv_pkt(&vhost_transport, pkt);
else else
virtio_transport_free_pkt(pkt); virtio_transport_free_pkt(pkt);
@ -793,52 +839,6 @@ static struct miscdevice vhost_vsock_misc = {
.fops = &vhost_vsock_fops, .fops = &vhost_vsock_fops,
}; };
static struct virtio_transport vhost_transport = {
.transport = {
.get_local_cid = vhost_transport_get_local_cid,
.init = virtio_transport_do_socket_init,
.destruct = virtio_transport_destruct,
.release = virtio_transport_release,
.connect = virtio_transport_connect,
.shutdown = virtio_transport_shutdown,
.cancel_pkt = vhost_transport_cancel_pkt,
.dgram_enqueue = virtio_transport_dgram_enqueue,
.dgram_dequeue = virtio_transport_dgram_dequeue,
.dgram_bind = virtio_transport_dgram_bind,
.dgram_allow = virtio_transport_dgram_allow,
.stream_enqueue = virtio_transport_stream_enqueue,
.stream_dequeue = virtio_transport_stream_dequeue,
.stream_has_data = virtio_transport_stream_has_data,
.stream_has_space = virtio_transport_stream_has_space,
.stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
.stream_is_active = virtio_transport_stream_is_active,
.stream_allow = virtio_transport_stream_allow,
.notify_poll_in = virtio_transport_notify_poll_in,
.notify_poll_out = virtio_transport_notify_poll_out,
.notify_recv_init = virtio_transport_notify_recv_init,
.notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
.notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
.notify_send_init = virtio_transport_notify_send_init,
.notify_send_pre_block = virtio_transport_notify_send_pre_block,
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.set_buffer_size = virtio_transport_set_buffer_size,
.set_min_buffer_size = virtio_transport_set_min_buffer_size,
.set_max_buffer_size = virtio_transport_set_max_buffer_size,
.get_buffer_size = virtio_transport_get_buffer_size,
.get_min_buffer_size = virtio_transport_get_min_buffer_size,
.get_max_buffer_size = virtio_transport_get_max_buffer_size,
},
.send_pkt = vhost_transport_send_pkt,
};
static int __init vhost_vsock_init(void) static int __init vhost_vsock_init(void)
{ {
int ret; int ret;

View File

@ -562,6 +562,37 @@ static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info)
wake_up(&fs_info->dev_replace.replace_wait); wake_up(&fs_info->dev_replace.replace_wait);
} }
/*
* When finishing the device replace, before swapping the source device with the
* target device we must update the chunk allocation state in the target device,
* as it is empty because replace works by directly copying the chunks and not
* through the normal chunk allocation path.
*/
static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev,
struct btrfs_device *tgtdev)
{
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 found_start;
u64 found_end;
int ret = 0;
lockdep_assert_held(&srcdev->fs_info->chunk_mutex);
while (!find_first_extent_bit(&srcdev->alloc_state, start,
&found_start, &found_end,
CHUNK_ALLOCATED, &cached_state)) {
ret = set_extent_bits(&tgtdev->alloc_state, found_start,
found_end, CHUNK_ALLOCATED);
if (ret)
break;
start = found_end + 1;
}
free_extent_state(cached_state);
return ret;
}
static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
int scrub_ret) int scrub_ret)
{ {
@ -636,8 +667,14 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
dev_replace->time_stopped = ktime_get_real_seconds(); dev_replace->time_stopped = ktime_get_real_seconds();
dev_replace->item_needs_writeback = 1; dev_replace->item_needs_writeback = 1;
/* replace old device with new one in mapping tree */ /*
* Update allocation state in the new device and replace the old device
* with the new one in the mapping tree.
*/
if (!scrub_ret) { if (!scrub_ret) {
scrub_ret = btrfs_set_target_alloc_state(src_device, tgt_device);
if (scrub_ret)
goto error;
btrfs_dev_replace_update_device_in_mapping_tree(fs_info, btrfs_dev_replace_update_device_in_mapping_tree(fs_info,
src_device, src_device,
tgt_device); tgt_device);
@ -648,6 +685,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
btrfs_dev_name(src_device), btrfs_dev_name(src_device),
src_device->devid, src_device->devid,
rcu_str_deref(tgt_device->name), scrub_ret); rcu_str_deref(tgt_device->name), scrub_ret);
error:
up_write(&dev_replace->rwsem); up_write(&dev_replace->rwsem);
mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);

View File

@ -218,8 +218,7 @@ struct eventpoll {
struct file *file; struct file *file;
/* used to optimize loop detection check */ /* used to optimize loop detection check */
int visited; u64 gen;
struct list_head visited_list_link;
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
/* used to track busy poll napi_id */ /* used to track busy poll napi_id */
@ -269,6 +268,8 @@ static long max_user_watches __read_mostly;
*/ */
static DEFINE_MUTEX(epmutex); static DEFINE_MUTEX(epmutex);
static u64 loop_check_gen = 0;
/* Used to check for epoll file descriptor inclusion loops */ /* Used to check for epoll file descriptor inclusion loops */
static struct nested_calls poll_loop_ncalls; static struct nested_calls poll_loop_ncalls;
@ -278,9 +279,6 @@ static struct kmem_cache *epi_cache __read_mostly;
/* Slab cache used to allocate "struct eppoll_entry" */ /* Slab cache used to allocate "struct eppoll_entry" */
static struct kmem_cache *pwq_cache __read_mostly; static struct kmem_cache *pwq_cache __read_mostly;
/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
static LIST_HEAD(visited_list);
/* /*
* List of files with newly added links, where we may need to limit the number * List of files with newly added links, where we may need to limit the number
* of emanating paths. Protected by the epmutex. * of emanating paths. Protected by the epmutex.
@ -1455,7 +1453,7 @@ static int reverse_path_check(void)
static int ep_create_wakeup_source(struct epitem *epi) static int ep_create_wakeup_source(struct epitem *epi)
{ {
const char *name; struct name_snapshot n;
struct wakeup_source *ws; struct wakeup_source *ws;
if (!epi->ep->ws) { if (!epi->ep->ws) {
@ -1464,8 +1462,9 @@ static int ep_create_wakeup_source(struct epitem *epi)
return -ENOMEM; return -ENOMEM;
} }
name = epi->ffd.file->f_path.dentry->d_name.name; take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
ws = wakeup_source_register(NULL, name); ws = wakeup_source_register(NULL, n.name.name);
release_dentry_name_snapshot(&n);
if (!ws) if (!ws)
return -ENOMEM; return -ENOMEM;
@ -1527,6 +1526,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
RCU_INIT_POINTER(epi->ws, NULL); RCU_INIT_POINTER(epi->ws, NULL);
} }
/* Add the current item to the list of active epoll hook for this file */
spin_lock(&tfile->f_lock);
list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
spin_unlock(&tfile->f_lock);
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
*/
ep_rbtree_insert(ep, epi);
/* now check if we've created too many backpaths */
error = -EINVAL;
if (full_check && reverse_path_check())
goto error_remove_epi;
/* Initialize the poll table using the queue callback */ /* Initialize the poll table using the queue callback */
epq.epi = epi; epq.epi = epi;
init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
@ -1549,22 +1564,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
if (epi->nwait < 0) if (epi->nwait < 0)
goto error_unregister; goto error_unregister;
/* Add the current item to the list of active epoll hook for this file */
spin_lock(&tfile->f_lock);
list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
spin_unlock(&tfile->f_lock);
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
*/
ep_rbtree_insert(ep, epi);
/* now check if we've created too many backpaths */
error = -EINVAL;
if (full_check && reverse_path_check())
goto error_remove_epi;
/* We have to drop the new item inside our item list to keep track of it */ /* We have to drop the new item inside our item list to keep track of it */
write_lock_irq(&ep->lock); write_lock_irq(&ep->lock);
@ -1593,6 +1592,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
return 0; return 0;
error_unregister:
ep_unregister_pollwait(ep, epi);
error_remove_epi: error_remove_epi:
spin_lock(&tfile->f_lock); spin_lock(&tfile->f_lock);
list_del_rcu(&epi->fllink); list_del_rcu(&epi->fllink);
@ -1600,9 +1601,6 @@ error_remove_epi:
rb_erase_cached(&epi->rbn, &ep->rbr); rb_erase_cached(&epi->rbn, &ep->rbr);
error_unregister:
ep_unregister_pollwait(ep, epi);
/* /*
* We need to do this because an event could have been arrived on some * We need to do this because an event could have been arrived on some
* allocated wait queue. Note that we don't care about the ep->ovflist * allocated wait queue. Note that we don't care about the ep->ovflist
@ -1969,13 +1967,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
struct epitem *epi; struct epitem *epi;
mutex_lock_nested(&ep->mtx, call_nests + 1); mutex_lock_nested(&ep->mtx, call_nests + 1);
ep->visited = 1; ep->gen = loop_check_gen;
list_add(&ep->visited_list_link, &visited_list);
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn); epi = rb_entry(rbp, struct epitem, rbn);
if (unlikely(is_file_epoll(epi->ffd.file))) { if (unlikely(is_file_epoll(epi->ffd.file))) {
ep_tovisit = epi->ffd.file->private_data; ep_tovisit = epi->ffd.file->private_data;
if (ep_tovisit->visited) if (ep_tovisit->gen == loop_check_gen)
continue; continue;
error = ep_call_nested(&poll_loop_ncalls, error = ep_call_nested(&poll_loop_ncalls,
ep_loop_check_proc, epi->ffd.file, ep_loop_check_proc, epi->ffd.file,
@ -2016,18 +2013,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
*/ */
static int ep_loop_check(struct eventpoll *ep, struct file *file) static int ep_loop_check(struct eventpoll *ep, struct file *file)
{ {
int ret; return ep_call_nested(&poll_loop_ncalls,
struct eventpoll *ep_cur, *ep_next;
ret = ep_call_nested(&poll_loop_ncalls,
ep_loop_check_proc, file, ep, current); ep_loop_check_proc, file, ep, current);
/* clear visited list */
list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
visited_list_link) {
ep_cur->visited = 0;
list_del(&ep_cur->visited_list_link);
}
return ret;
} }
static void clear_tfile_check_list(void) static void clear_tfile_check_list(void)
@ -2189,6 +2176,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
mutex_lock_nested(&ep->mtx, 0); mutex_lock_nested(&ep->mtx, 0);
if (op == EPOLL_CTL_ADD) { if (op == EPOLL_CTL_ADD) {
if (!list_empty(&f.file->f_ep_links) || if (!list_empty(&f.file->f_ep_links) ||
ep->gen == loop_check_gen ||
is_file_epoll(tf.file)) { is_file_epoll(tf.file)) {
full_check = 1; full_check = 1;
mutex_unlock(&ep->mtx); mutex_unlock(&ep->mtx);
@ -2249,6 +2237,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
error_tgt_fput: error_tgt_fput:
if (full_check) { if (full_check) {
clear_tfile_check_list(); clear_tfile_check_list();
loop_check_gen++;
mutex_unlock(&epmutex); mutex_unlock(&epmutex);
} }

View File

@ -3074,11 +3074,10 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t ret = 0; ssize_t ret = 0;
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct fuse_file *ff = file->private_data; struct fuse_file *ff = file->private_data;
bool async_dio = ff->fc->async_dio;
loff_t pos = 0; loff_t pos = 0;
struct inode *inode; struct inode *inode;
loff_t i_size; loff_t i_size;
size_t count = iov_iter_count(iter); size_t count = iov_iter_count(iter), shortened = 0;
loff_t offset = iocb->ki_pos; loff_t offset = iocb->ki_pos;
struct fuse_io_priv *io; struct fuse_io_priv *io;
@ -3086,17 +3085,9 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
inode = file->f_mapping->host; inode = file->f_mapping->host;
i_size = i_size_read(inode); i_size = i_size_read(inode);
if ((iov_iter_rw(iter) == READ) && (offset > i_size)) if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
return 0; return 0;
/* optimization for short read */
if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
if (offset >= i_size)
return 0;
iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset));
count = iov_iter_count(iter);
}
io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
if (!io) if (!io)
return -ENOMEM; return -ENOMEM;
@ -3112,15 +3103,22 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
* By default, we want to optimize all I/Os with async request * By default, we want to optimize all I/Os with async request
* submission to the client filesystem if supported. * submission to the client filesystem if supported.
*/ */
io->async = async_dio; io->async = ff->fc->async_dio;
io->iocb = iocb; io->iocb = iocb;
io->blocking = is_sync_kiocb(iocb); io->blocking = is_sync_kiocb(iocb);
/* optimization for short read */
if (io->async && !io->write && offset + count > i_size) {
iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset));
shortened = count - iov_iter_count(iter);
count -= shortened;
}
/* /*
* We cannot asynchronously extend the size of a file. * We cannot asynchronously extend the size of a file.
* In such case the aio will behave exactly like sync io. * In such case the aio will behave exactly like sync io.
*/ */
if ((offset + count > i_size) && iov_iter_rw(iter) == WRITE) if ((offset + count > i_size) && io->write)
io->blocking = true; io->blocking = true;
if (io->async && io->blocking) { if (io->async && io->blocking) {
@ -3138,6 +3136,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
} else { } else {
ret = __fuse_direct_read(io, iter, &pos); ret = __fuse_direct_read(io, iter, &pos);
} }
iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
if (io->async) { if (io->async) {
bool blocking = io->blocking; bool blocking = io->blocking;

View File

@ -553,6 +553,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
do { do {
if (entry->label)
entry->label->len = NFS4_MAXLABELLEN;
status = xdr_decode(desc, entry, &stream); status = xdr_decode(desc, entry, &stream);
if (status != 0) { if (status != 0) {
if (status == -EAGAIN) if (status == -EAGAIN)

View File

@ -1002,9 +1002,15 @@ xfs_file_iomap_begin(
* I/O, which must be block aligned, we need to report the * I/O, which must be block aligned, we need to report the
* newly allocated address. If the data fork has a hole, copy * newly allocated address. If the data fork has a hole, copy
* the COW fork mapping to avoid allocating to the data fork. * the COW fork mapping to avoid allocating to the data fork.
*
* Otherwise, ensure that the imap range does not extend past
* the range allocated/found in cmap.
*/ */
if (directio || imap.br_startblock == HOLESTARTBLOCK) if (directio || imap.br_startblock == HOLESTARTBLOCK)
imap = cmap; imap = cmap;
else
xfs_trim_extent(&imap, cmap.br_startoff,
cmap.br_blockcount);
end_fsb = imap.br_startoff + imap.br_blockcount; end_fsb = imap.br_startoff + imap.br_blockcount;
length = XFS_FSB_TO_B(mp, end_fsb) - offset; length = XFS_FSB_TO_B(mp, end_fsb) - offset;

View File

@ -419,7 +419,7 @@ static inline void free_part_info(struct hd_struct *part)
kfree(part->info); kfree(part->info);
} }
void update_io_ticks(struct hd_struct *part, unsigned long now); void update_io_ticks(struct hd_struct *part, unsigned long now, bool end);
/* block/genhd.c */ /* block/genhd.c */
extern void device_add_disk(struct device *parent, struct gendisk *disk, extern void device_add_disk(struct device *parent, struct gendisk *disk,

View File

@ -281,6 +281,7 @@ struct memstick_host {
struct memstick_dev *card; struct memstick_dev *card;
unsigned int retries; unsigned int retries;
bool removing;
/* Notify the host that some requests are pending. */ /* Notify the host that some requests are pending. */
void (*request)(struct memstick_host *host); void (*request)(struct memstick_host *host);

View File

@ -2208,7 +2208,7 @@ static inline void zero_resv_unavail(void) {}
extern void set_dma_reserve(unsigned long new_dma_reserve); extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
enum memmap_context, struct vmem_altmap *); enum meminit_context, struct vmem_altmap *);
extern void setup_per_zone_wmarks(void); extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void); extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void); extern void mem_init(void);

View File

@ -822,10 +822,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned int alloc_flags); unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order, bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx); unsigned long mark, int classzone_idx);
enum memmap_context { /*
MEMMAP_EARLY, * Memory initialization context, use to differentiate memory added by
MEMMAP_HOTPLUG, * the platform statically or via memory hotplug interface.
*/
enum meminit_context {
MEMINIT_EARLY,
MEMINIT_HOTPLUG,
}; };
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size); unsigned long size);

View File

@ -99,11 +99,13 @@ extern struct node *node_devices[];
typedef void (*node_registration_func_t)(struct node *); typedef void (*node_registration_func_t)(struct node *);
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
extern int link_mem_sections(int nid, unsigned long start_pfn, int link_mem_sections(int nid, unsigned long start_pfn,
unsigned long end_pfn); unsigned long end_pfn,
enum meminit_context context);
#else #else
static inline int link_mem_sections(int nid, unsigned long start_pfn, static inline int link_mem_sections(int nid, unsigned long start_pfn,
unsigned long end_pfn) unsigned long end_pfn,
enum meminit_context context)
{ {
return 0; return 0;
} }
@ -128,7 +130,8 @@ static inline int register_one_node(int nid)
if (error) if (error)
return error; return error;
/* link memory sections under this node */ /* link memory sections under this node */
error = link_mem_sections(nid, start_pfn, end_pfn); error = link_mem_sections(nid, start_pfn, end_pfn,
MEMINIT_EARLY);
} }
return error; return error;

View File

@ -150,7 +150,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk); void virtio_transport_destruct(struct vsock_sock *vsk);
void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt); void virtio_transport_recv_pkt(struct virtio_transport *t,
struct virtio_vsock_pkt *pkt);
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt); void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt); void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);

View File

@ -6382,15 +6382,13 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
{ {
int bit; int bit;
if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
return;
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
if (bit < 0) if (bit < 0)
return; return;
preempt_disable_notrace(); preempt_disable_notrace();
if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
op->func(ip, parent_ip, op, regs); op->func(ip, parent_ip, op, regs);
preempt_enable_notrace(); preempt_enable_notrace();

View File

@ -3621,7 +3621,7 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
{ {
bool tgid = flags & TRACE_ITER_RECORD_TGID; bool tgid = flags & TRACE_ITER_RECORD_TGID;
const char *space = " "; const char *space = " ";
int prec = tgid ? 10 : 2; int prec = tgid ? 12 : 2;
print_event_info(buf, m); print_event_info(buf, m);

View File

@ -482,7 +482,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
trace_find_cmdline(entry->pid, comm); trace_find_cmdline(entry->pid, comm);
trace_seq_printf(s, "%8.8s-%-5d %3d", trace_seq_printf(s, "%8.8s-%-7d %3d",
comm, entry->pid, cpu); comm, entry->pid, cpu);
return trace_print_lat_fmt(s, entry); return trace_print_lat_fmt(s, entry);
@ -573,15 +573,15 @@ int trace_print_context(struct trace_iterator *iter)
trace_find_cmdline(entry->pid, comm); trace_find_cmdline(entry->pid, comm);
trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid);
if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
unsigned int tgid = trace_find_tgid(entry->pid); unsigned int tgid = trace_find_tgid(entry->pid);
if (!tgid) if (!tgid)
trace_seq_printf(s, "(-----) "); trace_seq_printf(s, "(-------) ");
else else
trace_seq_printf(s, "(%5d) ", tgid); trace_seq_printf(s, "(%7d) ", tgid);
} }
trace_seq_printf(s, "[%03d] ", iter->cpu); trace_seq_printf(s, "[%03d] ", iter->cpu);
@ -624,7 +624,7 @@ int trace_print_lat_context(struct trace_iterator *iter)
trace_find_cmdline(entry->pid, comm); trace_find_cmdline(entry->pid, comm);
trace_seq_printf( trace_seq_printf(
s, "%16s %5d %3d %d %08x %08lx ", s, "%16s %7d %3d %d %08x %08lx ",
comm, entry->pid, iter->cpu, entry->flags, comm, entry->pid, iter->cpu, entry->flags,
entry->preempt_count, iter->idx); entry->preempt_count, iter->idx);
} else { } else {
@ -905,7 +905,7 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
S = task_index_to_char(field->prev_state); S = task_index_to_char(field->prev_state);
trace_find_cmdline(field->next_pid, comm); trace_find_cmdline(field->next_pid, comm);
trace_seq_printf(&iter->seq, trace_seq_printf(&iter->seq,
" %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n",
field->prev_pid, field->prev_pid,
field->prev_prio, field->prev_prio,
S, delim, S, delim,

View File

@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void)
} }
#endif #endif
DEFINE_PER_CPU(struct rnd_state, net_rand_state); DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
/** /**
* prandom_u32_state - seeded pseudo-random number generator. * prandom_u32_state - seeded pseudo-random number generator.

View File

@ -725,7 +725,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
* are reserved so nobody should be touching them so we should be safe * are reserved so nobody should be touching them so we should be safe
*/ */
memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
MEMMAP_HOTPLUG, altmap); MEMINIT_HOTPLUG, altmap);
set_zone_contiguous(zone); set_zone_contiguous(zone);
} }
@ -1082,7 +1082,8 @@ int __ref add_memory_resource(int nid, struct resource *res)
} }
/* link memory sections under this node.*/ /* link memory sections under this node.*/
ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1)); ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1),
MEMINIT_HOTPLUG);
BUG_ON(ret); BUG_ON(ret);
/* create new memmap entry */ /* create new memmap entry */

View File

@ -5875,7 +5875,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
* done. Non-atomic initialization, single-pass. * done. Non-atomic initialization, single-pass.
*/ */
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn, enum memmap_context context, unsigned long start_pfn, enum meminit_context context,
struct vmem_altmap *altmap) struct vmem_altmap *altmap)
{ {
unsigned long pfn, end_pfn = start_pfn + size; unsigned long pfn, end_pfn = start_pfn + size;
@ -5907,7 +5907,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
* There can be holes in boot-time mem_map[]s handed to this * There can be holes in boot-time mem_map[]s handed to this
* function. They do not exist on hotplugged memory. * function. They do not exist on hotplugged memory.
*/ */
if (context == MEMMAP_EARLY) { if (context == MEMINIT_EARLY) {
if (!early_pfn_valid(pfn)) if (!early_pfn_valid(pfn))
continue; continue;
if (!early_pfn_in_nid(pfn, nid)) if (!early_pfn_in_nid(pfn, nid))
@ -5920,7 +5920,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
__init_single_page(page, pfn, zone, nid); __init_single_page(page, pfn, zone, nid);
if (context == MEMMAP_HOTPLUG) if (context == MEMINIT_HOTPLUG)
__SetPageReserved(page); __SetPageReserved(page);
/* /*
@ -6002,7 +6002,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
* check here not to call set_pageblock_migratetype() against * check here not to call set_pageblock_migratetype() against
* pfn out of zone. * pfn out of zone.
* *
* Please note that MEMMAP_HOTPLUG path doesn't clear memmap * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
* because this is done early in section_activate() * because this is done early in section_activate()
*/ */
if (!(pfn & (pageblock_nr_pages - 1))) { if (!(pfn & (pageblock_nr_pages - 1))) {
@ -6028,7 +6028,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
void __meminit __weak memmap_init(unsigned long size, int nid, void __meminit __weak memmap_init(unsigned long size, int nid,
unsigned long zone, unsigned long start_pfn) unsigned long zone, unsigned long start_pfn)
{ {
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL); memmap_init_zone(size, nid, zone, start_pfn, MEMINIT_EARLY, NULL);
} }
static int zone_batchsize(struct zone *zone) static int zone_batchsize(struct zone *zone)

View File

@ -419,7 +419,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
else if (status->bw == RATE_INFO_BW_5) else if (status->bw == RATE_INFO_BW_5)
channel_flags |= IEEE80211_CHAN_QUARTER; channel_flags |= IEEE80211_CHAN_QUARTER;
if (status->band == NL80211_BAND_5GHZ) if (status->band == NL80211_BAND_5GHZ ||
status->band == NL80211_BAND_6GHZ)
channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
else if (status->encoding != RX_ENC_LEGACY) else if (status->encoding != RX_ENC_LEGACY)
channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;

View File

@ -168,10 +168,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
/* take some capabilities as-is */ /* take some capabilities as-is */
cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info);
vht_cap->cap = cap_info; vht_cap->cap = cap_info;
vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_VHT_TXOP_PS | IEEE80211_VHT_CAP_VHT_TXOP_PS |
IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_HTC_VHT |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
@ -180,6 +177,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK,
own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK);
/* and some based on our own capabilities */ /* and some based on our own capabilities */
switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:

View File

@ -1141,6 +1141,8 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
if (!tb[CTA_TUPLE_IP]) if (!tb[CTA_TUPLE_IP])
return -EINVAL; return -EINVAL;
if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
return -EOPNOTSUPP;
tuple->src.l3num = l3num; tuple->src.l3num = l3num;
err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple); err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);

View File

@ -86,33 +86,6 @@ out_rcu:
return ret; return ret;
} }
static void virtio_transport_loopback_work(struct work_struct *work)
{
struct virtio_vsock *vsock =
container_of(work, struct virtio_vsock, loopback_work);
LIST_HEAD(pkts);
spin_lock_bh(&vsock->loopback_list_lock);
list_splice_init(&vsock->loopback_list, &pkts);
spin_unlock_bh(&vsock->loopback_list_lock);
mutex_lock(&vsock->rx_lock);
if (!vsock->rx_run)
goto out;
while (!list_empty(&pkts)) {
struct virtio_vsock_pkt *pkt;
pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
list_del_init(&pkt->list);
virtio_transport_recv_pkt(pkt);
}
out:
mutex_unlock(&vsock->rx_lock);
}
static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock, static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
struct virtio_vsock_pkt *pkt) struct virtio_vsock_pkt *pkt)
{ {
@ -370,59 +343,6 @@ static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
return val < virtqueue_get_vring_size(vq); return val < virtqueue_get_vring_size(vq);
} }
static void virtio_transport_rx_work(struct work_struct *work)
{
struct virtio_vsock *vsock =
container_of(work, struct virtio_vsock, rx_work);
struct virtqueue *vq;
vq = vsock->vqs[VSOCK_VQ_RX];
mutex_lock(&vsock->rx_lock);
if (!vsock->rx_run)
goto out;
do {
virtqueue_disable_cb(vq);
for (;;) {
struct virtio_vsock_pkt *pkt;
unsigned int len;
if (!virtio_transport_more_replies(vsock)) {
/* Stop rx until the device processes already
* pending replies. Leave rx virtqueue
* callbacks disabled.
*/
goto out;
}
pkt = virtqueue_get_buf(vq, &len);
if (!pkt) {
break;
}
vsock->rx_buf_nr--;
/* Drop short/long packets */
if (unlikely(len < sizeof(pkt->hdr) ||
len > sizeof(pkt->hdr) + pkt->len)) {
virtio_transport_free_pkt(pkt);
continue;
}
pkt->len = len - sizeof(pkt->hdr);
virtio_transport_deliver_tap_pkt(pkt);
virtio_transport_recv_pkt(pkt);
}
} while (!virtqueue_enable_cb(vq));
out:
if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
virtio_vsock_rx_fill(vsock);
mutex_unlock(&vsock->rx_lock);
}
/* event_lock must be held */ /* event_lock must be held */
static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
struct virtio_vsock_event *event) struct virtio_vsock_event *event)
@ -586,6 +506,86 @@ static struct virtio_transport virtio_transport = {
.send_pkt = virtio_transport_send_pkt, .send_pkt = virtio_transport_send_pkt,
}; };
static void virtio_transport_loopback_work(struct work_struct *work)
{
struct virtio_vsock *vsock =
container_of(work, struct virtio_vsock, loopback_work);
LIST_HEAD(pkts);
spin_lock_bh(&vsock->loopback_list_lock);
list_splice_init(&vsock->loopback_list, &pkts);
spin_unlock_bh(&vsock->loopback_list_lock);
mutex_lock(&vsock->rx_lock);
if (!vsock->rx_run)
goto out;
while (!list_empty(&pkts)) {
struct virtio_vsock_pkt *pkt;
pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
list_del_init(&pkt->list);
virtio_transport_recv_pkt(&virtio_transport, pkt);
}
out:
mutex_unlock(&vsock->rx_lock);
}
static void virtio_transport_rx_work(struct work_struct *work)
{
struct virtio_vsock *vsock =
container_of(work, struct virtio_vsock, rx_work);
struct virtqueue *vq;
vq = vsock->vqs[VSOCK_VQ_RX];
mutex_lock(&vsock->rx_lock);
if (!vsock->rx_run)
goto out;
do {
virtqueue_disable_cb(vq);
for (;;) {
struct virtio_vsock_pkt *pkt;
unsigned int len;
if (!virtio_transport_more_replies(vsock)) {
/* Stop rx until the device processes already
* pending replies. Leave rx virtqueue
* callbacks disabled.
*/
goto out;
}
pkt = virtqueue_get_buf(vq, &len);
if (!pkt) {
break;
}
vsock->rx_buf_nr--;
/* Drop short/long packets */
if (unlikely(len < sizeof(pkt->hdr) ||
len > sizeof(pkt->hdr) + pkt->len)) {
virtio_transport_free_pkt(pkt);
continue;
}
pkt->len = len - sizeof(pkt->hdr);
virtio_transport_deliver_tap_pkt(pkt);
virtio_transport_recv_pkt(&virtio_transport, pkt);
}
} while (!virtqueue_enable_cb(vq));
out:
if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
virtio_vsock_rx_fill(vsock);
mutex_unlock(&vsock->rx_lock);
}
static int virtio_vsock_probe(struct virtio_device *vdev) static int virtio_vsock_probe(struct virtio_device *vdev)
{ {
vq_callback_t *callbacks[] = { vq_callback_t *callbacks[] = {

View File

@ -696,9 +696,9 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
/* Normally packets are associated with a socket. There may be no socket if an /* Normally packets are associated with a socket. There may be no socket if an
* attempt was made to connect to a socket that does not exist. * attempt was made to connect to a socket that does not exist.
*/ */
static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
struct virtio_vsock_pkt *pkt)
{ {
const struct virtio_transport *t;
struct virtio_vsock_pkt *reply; struct virtio_vsock_pkt *reply;
struct virtio_vsock_pkt_info info = { struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST, .op = VIRTIO_VSOCK_OP_RST,
@ -718,7 +718,6 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
if (!reply) if (!reply)
return -ENOMEM; return -ENOMEM;
t = virtio_transport_get_ops();
if (!t) { if (!t) {
virtio_transport_free_pkt(reply); virtio_transport_free_pkt(reply);
return -ENOTCONN; return -ENOTCONN;
@ -1060,7 +1059,8 @@ static bool virtio_transport_space_update(struct sock *sk,
/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
* lock. * lock.
*/ */
void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) void virtio_transport_recv_pkt(struct virtio_transport *t,
struct virtio_vsock_pkt *pkt)
{ {
struct sockaddr_vm src, dst; struct sockaddr_vm src, dst;
struct vsock_sock *vsk; struct vsock_sock *vsk;
@ -1082,7 +1082,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
le32_to_cpu(pkt->hdr.fwd_cnt)); le32_to_cpu(pkt->hdr.fwd_cnt));
if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) { if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
(void)virtio_transport_reset_no_sock(pkt); (void)virtio_transport_reset_no_sock(t, pkt);
goto free_pkt; goto free_pkt;
} }
@ -1093,7 +1093,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
if (!sk) { if (!sk) {
sk = vsock_find_bound_socket(&dst); sk = vsock_find_bound_socket(&dst);
if (!sk) { if (!sk) {
(void)virtio_transport_reset_no_sock(pkt); (void)virtio_transport_reset_no_sock(t, pkt);
goto free_pkt; goto free_pkt;
} }
} }
@ -1127,6 +1127,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
virtio_transport_free_pkt(pkt); virtio_transport_free_pkt(pkt);
break; break;
default: default:
(void)virtio_transport_reset_no_sock(t, pkt);
virtio_transport_free_pkt(pkt); virtio_transport_free_pkt(pkt);
break; break;
} }

View File

@ -9,7 +9,7 @@ dtc-objs := dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o
# Source files need to get at the userspace version of libfdt_env.h to compile # Source files need to get at the userspace version of libfdt_env.h to compile
HOST_EXTRACFLAGS := -I $(srctree)/$(src)/libfdt HOST_EXTRACFLAGS += -I $(srctree)/$(src)/libfdt
ifeq ($(shell pkg-config --exists yaml-0.1 2>/dev/null && echo yes),) ifeq ($(shell pkg-config --exists yaml-0.1 2>/dev/null && echo yes),)
ifneq ($(CHECK_DTBS),) ifneq ($(CHECK_DTBS),)

View File

@ -130,7 +130,7 @@ static int io_uring_register_files(struct submitter *s)
s->nr_files); s->nr_files);
} }
static int gettid(void) static int lk_gettid(void)
{ {
return syscall(__NR_gettid); return syscall(__NR_gettid);
} }
@ -281,7 +281,7 @@ static void *submitter_fn(void *data)
struct io_sq_ring *ring = &s->sq_ring; struct io_sq_ring *ring = &s->sq_ring;
int ret, prepped; int ret, prepped;
printf("submitter=%d\n", gettid()); printf("submitter=%d\n", lk_gettid());
srand48_r(pthread_self(), &s->rand); srand48_r(pthread_self(), &s->rand);

View File

@ -59,7 +59,7 @@ FEATURE_USER = .libbpf
FEATURE_TESTS = libelf libelf-mmap bpf reallocarray cxx FEATURE_TESTS = libelf libelf-mmap bpf reallocarray cxx
FEATURE_DISPLAY = libelf bpf FEATURE_DISPLAY = libelf bpf
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES) FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
check_feat := 1 check_feat := 1