1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Revert regression inducing change to the IPSEC template resolver,
    from Steffen Klassert.

 2) Peeloffs can cause the wrong sk to be waken up in SCTP, fix from Xin
    Long.

 3) Min packet MTU size is wrong in cpsw driver, from Grygorii Strashko.

 4) Fix build failure in netfilter ctnetlink, from Arnd Bergmann.

 5) ISDN hisax driver checks pnp_irq() for errors incorrectly, from
    Arvind Yadav.

 6) Fix fealnx driver build failure on MIPS, from Huacai Chen.

 7) Fix into leak in SCTP, the scope_id of socket addresses is not
    always filled in. From Eric W. Biederman.

 8) MTU inheritance between physical function and representor fix in nfp
    driver, from Dirk van der Merwe.

 9) Fix memory leak in rsi driver, from Colin Ian King.

10) Fix expiration and generation ID handling of cached ipv4 redirect
    routes, from Xin Long.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (40 commits)
  net: usb: hso.c: remove unneeded DRIVER_LICENSE #define
  ibmvnic: fix dma_mapping_error call
  ipvlan: NULL pointer dereference panic in ipvlan_port_destroy
  route: also update fnhe_genid when updating a route cache
  route: update fnhe_expires for redirect when the fnhe exists
  sctp: set frag_point in sctp_setsockopt_maxseg correctly
  rsi: fix memory leak on buf and usb_reg_buf
  net/netlabel: Add list_next_rcu() in rcu_dereference().
  nfp: remove false positive offloads in flower vxlan
  nfp: register flower reprs for egress dev offload
  nfp: inherit the max_mtu from the PF netdev
  nfp: fix vlan receive MAC statistics typo
  nfp: fix flower offload metadata flag usage
  virto_net: remove empty file 'virtio_net.'
  net/sctp: Always set scope_id in sctp_inet6_skb_msgname
  fealnx: Fix building error on MIPS
  isdn: hisax: Fix pnp_irq's error checking for setup_teles3
  isdn: hisax: Fix pnp_irq's error checking for setup_sedlbauer_isapnp
  isdn: hisax: Fix pnp_irq's error checking for setup_niccy
  isdn: hisax: Fix pnp_irq's error checking for setup_ix1micro
  ...
hifive-unleashed-5.1
Linus Torvalds 2017-11-17 20:18:37 -08:00
commit 8170024750
49 changed files with 396 additions and 229 deletions

View File

@ -348,7 +348,7 @@ int setup_asuscom(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "AsusPnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);

View File

@ -805,7 +805,7 @@ static int avm_pnp_setup(struct IsdnCardState *cs)
cs->hw.avm.cfg_reg =
pnp_port_start(pnp_avm_d, 0);
cs->irq = pnp_irq(pnp_avm_d, 0);
if (!cs->irq) {
if (cs->irq == -1) {
printk(KERN_ERR "FritzPnP:No IRQ\n");
return (0);
}

View File

@ -1093,7 +1093,7 @@ static int setup_diva_isapnp(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "Diva PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);

View File

@ -945,7 +945,7 @@ static int setup_elsa_isapnp(struct IsdnCard *card)
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "Elsa PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);

View File

@ -1423,7 +1423,7 @@ int setup_hfcsx(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);

View File

@ -196,7 +196,7 @@ int setup_hfcs(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);

View File

@ -940,6 +940,8 @@ static int fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
}
adapter->io = pnp_port_start(pdev, 0);
adapter->irq = pnp_irq(pdev, 0);
if (!adapter->io || adapter->irq == -1)
goto err_free;
printk(KERN_INFO "hisax_fcpcipnp: found adapter %s at IO %#x irq %d\n",
(char *) dev_id->driver_data, adapter->io, adapter->irq);

View File

@ -238,7 +238,7 @@ int setup_isurf(struct IsdnCard *card)
cs->hw.isurf.reset = pnp_port_start(pnp_d, 0);
cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1);
cs->irq = pnp_irq(pnp_d, 0);
if (!cs->irq || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) {
if (cs->irq == -1 || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) {
printk(KERN_ERR "ISurfPnP:some resources are missing %d/%x/%lx\n",
cs->irq, cs->hw.isurf.reset, cs->hw.isurf.phymem);
pnp_disable_dev(pnp_d);

View File

@ -256,7 +256,7 @@ int setup_ix1micro(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "ITK PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);

View File

@ -261,7 +261,7 @@ int setup_niccy(struct IsdnCard *card)
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[2] = pnp_port_start(pnp_d, 1);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1] ||
if (card->para[0] == -1 || !card->para[1] ||
!card->para[2]) {
printk(KERN_ERR "NiccyPnP:some resources are "
"missing %ld/%lx/%lx\n",

View File

@ -558,7 +558,7 @@ static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "Sedlbauer PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);

View File

@ -306,7 +306,7 @@ int setup_teles3(struct IsdnCard *card)
card->para[2] = pnp_port_start(pnp_d, 1);
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1] || !card->para[2]) {
if (card->para[0] == -1 || !card->para[1] || !card->para[2]) {
printk(KERN_ERR "Teles PnP:some resources are missing %ld/%lx/%lx\n",
card->para[0], card->para[1], card->para[2]);
pnp_disable_dev(pnp_d);

View File

@ -257,8 +257,8 @@ enum rx_desc_status_bits {
RXFSD = 0x00000800, /* first descriptor */
RXLSD = 0x00000400, /* last descriptor */
ErrorSummary = 0x80, /* error summary */
RUNT = 0x40, /* runt packet received */
LONG = 0x20, /* long packet received */
RUNTPKT = 0x40, /* runt packet received */
LONGPKT = 0x20, /* long packet received */
FAE = 0x10, /* frame align error */
CRC = 0x08, /* crc error */
RXER = 0x04, /* receive error */
@ -1628,7 +1628,7 @@ static int netdev_rx(struct net_device *dev)
dev->name, rx_status);
dev->stats.rx_errors++; /* end of a packet. */
if (rx_status & (LONG | RUNT))
if (rx_status & (LONGPKT | RUNTPKT))
dev->stats.rx_length_errors++;
if (rx_status & RXER)
dev->stats.rx_frame_errors++;

View File

@ -849,7 +849,6 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
dma_addr_t dma_addr;
int len = 0;
if (adapter->vpd->buff)
@ -879,7 +878,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
adapter->vpd->dma_addr =
dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
dev_err(dev, "Could not map VPD buffer\n");
kfree(adapter->vpd->buff);
return -ENOMEM;

View File

@ -125,6 +125,21 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
return nfp_flower_cmsg_portmod(repr, false);
}
static int
nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
{
return tc_setup_cb_egdev_register(netdev,
nfp_flower_setup_tc_egress_cb,
netdev_priv(netdev));
}
static void
nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
{
tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
netdev_priv(netdev));
}
static void nfp_flower_sriov_disable(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@ -452,6 +467,9 @@ const struct nfp_app_type app_flower = {
.vnic_init = nfp_flower_vnic_init,
.vnic_clean = nfp_flower_vnic_clean,
.repr_init = nfp_flower_repr_netdev_init,
.repr_clean = nfp_flower_repr_netdev_clean,
.repr_open = nfp_flower_repr_netdev_open,
.repr_stop = nfp_flower_repr_netdev_stop,

View File

@ -52,8 +52,7 @@ struct nfp_app;
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
#define NFP_FL_META_FLAG_NEW_MASK 128
#define NFP_FL_META_FLAG_LAST_MASK 1
#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7)
#define NFP_FL_MASK_REUSE_TIME_NS 40000
#define NFP_FL_MASK_ID_LOCATION 1
@ -197,5 +196,7 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
void *cb_priv);
#endif

View File

@ -282,7 +282,7 @@ nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
id = nfp_add_mask_table(app, mask_data, mask_len);
if (id < 0)
return false;
*meta_flags |= NFP_FL_META_FLAG_NEW_MASK;
*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
}
*mask_id = id;
@ -299,6 +299,9 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
if (!mask_entry)
return false;
if (meta_flags)
*meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
*mask_id = mask_entry->mask_id;
mask_entry->ref_cnt--;
if (!mask_entry->ref_cnt) {
@ -306,7 +309,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
nfp_release_mask_id(app, *mask_id);
kfree(mask_entry);
if (meta_flags)
*meta_flags |= NFP_FL_META_FLAG_LAST_MASK;
*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
}
return true;

View File

@ -131,7 +131,8 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
static int
nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow)
struct tc_cls_flower_offload *flow,
bool egress)
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
@ -167,6 +168,9 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->key);
if (!egress)
return -EOPNOTSUPP;
if (mask_enc_ctl->addr_type != 0xffff ||
enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
return -EOPNOTSUPP;
@ -194,6 +198,9 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_vxlan);
} else if (egress) {
/* Reject non tunnel matches offloaded to egress repr. */
return -EOPNOTSUPP;
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@ -315,7 +322,7 @@ err_free_flow:
*/
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow)
struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
@ -326,7 +333,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
err = nfp_flower_calculate_key_layers(key_layer, flow);
err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
if (err)
goto err_free_key_ls;
@ -447,7 +454,7 @@ nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flower)
struct tc_cls_flower_offload *flower, bool egress)
{
if (!eth_proto_is_802_3(flower->common.protocol) ||
flower->common.chain_index)
@ -455,7 +462,7 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
switch (flower->command) {
case TC_CLSFLOWER_REPLACE:
return nfp_flower_add_offload(app, netdev, flower);
return nfp_flower_add_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_DESTROY:
return nfp_flower_del_offload(app, netdev, flower);
case TC_CLSFLOWER_STATS:
@ -465,6 +472,23 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct nfp_repr *repr = cb_priv;
if (!tc_can_offload(repr->netdev))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev,
type_data, true);
default:
return -EOPNOTSUPP;
}
}
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
@ -476,7 +500,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev,
type_data);
type_data, false);
default:
return -EOPNOTSUPP;
}

View File

@ -76,6 +76,8 @@ extern const struct nfp_app_type app_flower;
* @vnic_free: free up app's vNIC state
* @vnic_init: vNIC netdev was registered
* @vnic_clean: vNIC netdev about to be unregistered
* @repr_init: representor about to be registered
* @repr_clean: representor about to be unregistered
* @repr_open: representor netdev open callback
* @repr_stop: representor netdev stop callback
* @start: start application logic
@ -109,6 +111,9 @@ struct nfp_app_type {
int (*vnic_init)(struct nfp_app *app, struct nfp_net *nn);
void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
int (*repr_init)(struct nfp_app *app, struct net_device *netdev);
void (*repr_clean)(struct nfp_app *app, struct net_device *netdev);
int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
@ -212,6 +217,21 @@ static inline int nfp_app_repr_stop(struct nfp_app *app, struct nfp_repr *repr)
return app->type->repr_stop(app, repr);
}
static inline int
nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev)
{
if (!app->type->repr_init)
return 0;
return app->type->repr_init(app, netdev);
}
static inline void
nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
{
if (app->type->repr_clean)
app->type->repr_clean(app, netdev);
}
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{
app->ctrl = ctrl;

View File

@ -104,7 +104,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
{ "rx_frame_too_long_errors",
NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, },
{ "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, },
{ "rx_vlan_reveive_ok", NFP_MAC_STATS_RX_VLAN_REVEIVE_OK, },
{ "rx_vlan_received_ok", NFP_MAC_STATS_RX_VLAN_RECEIVED_OK, },
{ "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS, },
{ "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, },
{ "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS, },

View File

@ -258,6 +258,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
static void nfp_repr_clean(struct nfp_repr *repr)
{
unregister_netdev(repr->netdev);
nfp_app_repr_clean(repr->app, repr->netdev);
dst_release((struct dst_entry *)repr->dst);
nfp_port_free(repr->port);
}
@ -297,6 +298,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->netdev_ops = &nfp_repr_netdev_ops;
netdev->ethtool_ops = &nfp_port_ethtool_ops;
netdev->max_mtu = pf_netdev->max_mtu;
SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
if (nfp_app_has_tc(app)) {
@ -304,12 +307,18 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->hw_features |= NETIF_F_HW_TC;
}
err = register_netdev(netdev);
err = nfp_app_repr_init(app, netdev);
if (err)
goto err_clean;
err = register_netdev(netdev);
if (err)
goto err_repr_clean;
return 0;
err_repr_clean:
nfp_app_repr_clean(app, netdev);
err_clean:
dst_release((struct dst_entry *)repr->dst);
return err;

View File

@ -157,7 +157,7 @@ void nfp_devlink_port_unregister(struct nfp_port *port);
/* unused 0x008 */
#define NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS (NFP_MAC_STATS_BASE + 0x010)
#define NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS (NFP_MAC_STATS_BASE + 0x018)
#define NFP_MAC_STATS_RX_VLAN_REVEIVE_OK (NFP_MAC_STATS_BASE + 0x020)
#define NFP_MAC_STATS_RX_VLAN_RECEIVED_OK (NFP_MAC_STATS_BASE + 0x020)
#define NFP_MAC_STATS_RX_IN_ERRORS (NFP_MAC_STATS_BASE + 0x028)
#define NFP_MAC_STATS_RX_IN_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x030)
#define NFP_MAC_STATS_RX_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038)

View File

@ -1277,11 +1277,10 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
{
struct qed_dcbx_get *dcbx_info;
dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_ATOMIC);
if (!dcbx_info)
return NULL;
memset(dcbx_info, 0, sizeof(*dcbx_info));
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
return NULL;

View File

@ -119,8 +119,8 @@ do { \
#define CPDMA_RXCP 0x60
#define CPSW_POLL_WEIGHT 64
#define CPSW_MIN_PACKET_SIZE 60
#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4)
#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define RX_PRIORITY_MAPPING 0x76543210
#define TX_PRIORITY_MAPPING 0x33221100

View File

@ -646,6 +646,10 @@ struct nvsp_message {
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
#define NETVSC_SEND_BUFFER_ID 0
#define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \
NETIF_F_TSO | NETIF_F_IPV6_CSUM | \
NETIF_F_TSO6)
#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64
#define VRSS_CHANNEL_DEFAULT 8

View File

@ -2011,7 +2011,7 @@ static int netvsc_probe(struct hv_device *dev,
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
/* hw_features computed in rndis_filter_device_add */
/* hw_features computed in rndis_netdev_set_hwcaps() */
net->features = net->hw_features |
NETIF_F_HIGHDMA | NETIF_F_SG |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;

View File

@ -1131,69 +1131,20 @@ unlock:
rtnl_unlock();
}
struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info)
static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
struct netvsc_device *nvdev)
{
struct net_device *net = hv_get_drvdata(dev);
struct net_device *net = rndis_device->ndev;
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
unsigned int gso_max_size = GSO_MAX_SIZE;
u32 mtu, size;
const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs;
int i, ret;
rndis_device = get_rndis_device();
if (!rndis_device)
return ERR_PTR(-ENODEV);
/*
* Let the inner driver handle this first to create the netvsc channel
* NOTE! Once the channel is created, we may get a receive callback
* (RndisFilterOnReceive()) before this call is completed
*/
net_device = netvsc_device_add(dev, device_info);
if (IS_ERR(net_device)) {
kfree(rndis_device);
return net_device;
}
/* Initialize the rndis device */
net_device->max_chn = 1;
net_device->num_chn = 1;
net_device->extension = rndis_device;
rndis_device->ndev = net;
/* Send the rndis initialization message */
ret = rndis_filter_init_device(rndis_device, net_device);
if (ret != 0)
goto err_dev_remv;
/* Get the MTU from the host */
size = sizeof(u32);
ret = rndis_filter_query_device(rndis_device, net_device,
RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
&mtu, &size);
if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
net->mtu = mtu;
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device, net_device);
if (ret != 0)
goto err_dev_remv;
memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
int ret;
/* Find HW offload capabilities */
ret = rndis_query_hwcaps(rndis_device, net_device, &hwcaps);
ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
if (ret != 0)
goto err_dev_remv;
return ret;
/* A value of zero means "no change"; now turn on what we want. */
memset(&offloads, 0, sizeof(struct ndis_offload_params));
@ -1201,8 +1152,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
/* Linux does not care about IP checksum, always does in kernel */
offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
/* Reset previously set hw_features flags */
net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
net_device_ctx->tx_checksum_mask = 0;
/* Compute tx offload settings based on hw capabilities */
net->hw_features = NETIF_F_RXCSUM;
net->hw_features |= NETIF_F_RXCSUM;
if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
/* Can checksum TCP */
@ -1246,10 +1201,75 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
}
}
/* In case some hw_features disappeared we need to remove them from
* net->features list as they're no longer supported.
*/
net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
netif_set_gso_max_size(net, gso_max_size);
ret = rndis_filter_set_offload_params(net, net_device, &offloads);
if (ret)
ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
return ret;
}
struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info)
{
struct net_device *net = hv_get_drvdata(dev);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs;
int i, ret;
rndis_device = get_rndis_device();
if (!rndis_device)
return ERR_PTR(-ENODEV);
/* Let the inner driver handle this first to create the netvsc channel
* NOTE! Once the channel is created, we may get a receive callback
* (RndisFilterOnReceive()) before this call is completed
*/
net_device = netvsc_device_add(dev, device_info);
if (IS_ERR(net_device)) {
kfree(rndis_device);
return net_device;
}
/* Initialize the rndis device */
net_device->max_chn = 1;
net_device->num_chn = 1;
net_device->extension = rndis_device;
rndis_device->ndev = net;
/* Send the rndis initialization message */
ret = rndis_filter_init_device(rndis_device, net_device);
if (ret != 0)
goto err_dev_remv;
/* Get the MTU from the host */
size = sizeof(u32);
ret = rndis_filter_query_device(rndis_device, net_device,
RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
&mtu, &size);
if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
net->mtu = mtu;
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device, net_device);
if (ret != 0)
goto err_dev_remv;
memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
/* Query and set hardware capabilities */
ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
if (ret != 0)
goto err_dev_remv;
rndis_filter_query_device_link_status(rndis_device, net_device);

View File

@ -107,16 +107,6 @@ static int ipvlan_port_create(struct net_device *dev)
struct ipvl_port *port;
int err, idx;
if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) {
netdev_err(dev, "Master is either lo or non-ether device\n");
return -EINVAL;
}
if (netdev_is_rx_handler_busy(dev)) {
netdev_err(dev, "Device is already in use.\n");
return -EBUSY;
}
port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
@ -179,8 +169,9 @@ static void ipvlan_port_destroy(struct net_device *dev)
static int ipvlan_init(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
const struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_port *port = ipvlan->port;
struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_port *port;
int err;
dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
(phy_dev->state & IPVLAN_STATE_MASK);
@ -196,18 +187,27 @@ static int ipvlan_init(struct net_device *dev)
if (!ipvlan->pcpu_stats)
return -ENOMEM;
if (!netif_is_ipvlan_port(phy_dev)) {
err = ipvlan_port_create(phy_dev);
if (err < 0) {
free_percpu(ipvlan->pcpu_stats);
return err;
}
}
port = ipvlan_port_get_rtnl(phy_dev);
port->count += 1;
return 0;
}
static void ipvlan_uninit(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port = ipvlan->port;
struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_port *port;
free_percpu(ipvlan->pcpu_stats);
port = ipvlan_port_get_rtnl(phy_dev);
port->count -= 1;
if (!port->count)
ipvlan_port_destroy(port->dev);
@ -554,7 +554,6 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
struct net_device *phy_dev;
int err;
u16 mode = IPVLAN_MODE_L3;
bool create = false;
if (!tb[IFLA_LINK])
return -EINVAL;
@ -568,28 +567,41 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
phy_dev = tmp->phy_dev;
} else if (!netif_is_ipvlan_port(phy_dev)) {
err = ipvlan_port_create(phy_dev);
if (err < 0)
return err;
create = true;
/* Exit early if the underlying link is invalid or busy */
if (phy_dev->type != ARPHRD_ETHER ||
phy_dev->flags & IFF_LOOPBACK) {
netdev_err(phy_dev,
"Master is either lo or non-ether device\n");
return -EINVAL;
}
if (netdev_is_rx_handler_busy(phy_dev)) {
netdev_err(phy_dev, "Device is already in use.\n");
return -EBUSY;
}
}
if (data && data[IFLA_IPVLAN_MODE])
mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
port = ipvlan_port_get_rtnl(phy_dev);
ipvlan->phy_dev = phy_dev;
ipvlan->dev = dev;
ipvlan->port = port;
ipvlan->sfeatures = IPVLAN_FEATURES;
ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
/* Flags are per port and latest update overrides. User has
* to be consistent in setting it just like the mode attribute.
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
* packets.
*/
if (data && data[IFLA_IPVLAN_FLAGS])
ipvlan->port->flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
dev->priv_flags |= IFF_IPVLAN_SLAVE;
err = register_netdevice(dev);
if (err < 0)
return err;
/* ipvlan_init() would have created the port, if required */
port = ipvlan_port_get_rtnl(phy_dev);
ipvlan->port = port;
/* If the port-id base is at the MAX value, then wrap it around and
* begin from 0x1 again. This may be due to a busy system where lots
@ -609,31 +621,28 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
err = ida_simple_get(&port->ida, 0x1, port->dev_id_start,
GFP_KERNEL);
if (err < 0)
goto destroy_ipvlan_port;
goto unregister_netdev;
dev->dev_id = err;
/* Increment id-base to the next slot for the future assignment */
port->dev_id_start = err + 1;
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
* packets.
*/
memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
dev->priv_flags |= IFF_IPVLAN_SLAVE;
err = register_netdevice(dev);
if (err < 0)
err = netdev_upper_dev_link(phy_dev, dev, extack);
if (err)
goto remove_ida;
err = netdev_upper_dev_link(phy_dev, dev, extack);
if (err) {
goto unregister_netdev;
}
/* Flags are per port and latest update overrides. User has
* to be consistent in setting it just like the mode attribute.
*/
if (data && data[IFLA_IPVLAN_FLAGS])
port->flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
if (data && data[IFLA_IPVLAN_MODE])
mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
err = ipvlan_set_port_mode(port, mode);
if (err) {
if (err)
goto unlink_netdev;
}
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
netif_stacked_transfer_operstate(phy_dev, dev);
@ -641,13 +650,10 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
unlink_netdev:
netdev_upper_dev_unlink(phy_dev, dev);
unregister_netdev:
unregister_netdevice(dev);
remove_ida:
ida_simple_remove(&port->ida, dev->dev_id);
destroy_ipvlan_port:
if (create)
ipvlan_port_destroy(phy_dev);
unregister_netdev:
unregister_netdevice(dev);
return err;
}
EXPORT_SYMBOL_GPL(ipvlan_link_new);

View File

@ -2411,7 +2411,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (!hdr)
return -EMSGSIZE;
genl_dump_check_consistent(cb, hdr, &macsec_fam);
genl_dump_check_consistent(cb, hdr);
if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;

View File

@ -76,7 +76,6 @@
#define MOD_AUTHOR "Option Wireless"
#define MOD_DESCRIPTION "USB High Speed Option driver"
#define MOD_LICENSE "GPL"
#define HSO_MAX_NET_DEVICES 10
#define HSO__MAX_MTU 2048
@ -3286,7 +3285,7 @@ module_exit(hso_exit);
MODULE_AUTHOR(MOD_AUTHOR);
MODULE_DESCRIPTION(MOD_DESCRIPTION);
MODULE_LICENSE(MOD_LICENSE);
MODULE_LICENSE("GPL");
/* change the debug level (eg: insmod hso.ko debug=0x04) */
MODULE_PARM_DESC(debug, "debug level mask [0x01 | 0x02 | 0x04 | 0x08 | 0x10]");

View File

@ -2805,7 +2805,7 @@ static int mac80211_hwsim_get_radio(struct sk_buff *skb,
return -EMSGSIZE;
if (cb)
genl_dump_check_consistent(cb, hdr, &hwsim_genl_family);
genl_dump_check_consistent(cb, hdr);
if (data->alpha2[0] && data->alpha2[1])
param.reg_alpha2 = data->alpha2;

View File

@ -162,13 +162,13 @@ static int rsi_usb_reg_read(struct usb_device *usbdev,
u8 *buf;
int status = -ENOMEM;
if (len > RSI_USB_CTRL_BUF_SIZE)
return -EINVAL;
buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL);
if (!buf)
return status;
if (len > RSI_USB_CTRL_BUF_SIZE)
return -EINVAL;
status = usb_control_msg(usbdev,
usb_rcvctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_READ,
@ -207,13 +207,13 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
u8 *usb_reg_buf;
int status = -ENOMEM;
if (len > RSI_USB_CTRL_BUF_SIZE)
return -EINVAL;
usb_reg_buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL);
if (!usb_reg_buf)
return status;
if (len > RSI_USB_CTRL_BUF_SIZE)
return -EINVAL;
usb_reg_buf[0] = (value & 0x00ff);
usb_reg_buf[1] = (value & 0xff00) >> 8;
usb_reg_buf[2] = 0x0;

View File

@ -154,15 +154,12 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
/**
* genlmsg_nlhdr - Obtain netlink header from user specified header
* @user_hdr: user header as returned from genlmsg_put()
* @family: generic netlink family
*
* Returns pointer to netlink header.
*/
static inline struct nlmsghdr *
genlmsg_nlhdr(void *user_hdr, const struct genl_family *family)
static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr)
{
return (struct nlmsghdr *)((char *)user_hdr -
family->hdrsize -
GENL_HDRLEN -
NLMSG_HDRLEN);
}
@ -190,16 +187,14 @@ static inline int genlmsg_parse(const struct nlmsghdr *nlh,
* genl_dump_check_consistent - check if sequence is consistent and advertise if not
* @cb: netlink callback structure that stores the sequence number
* @user_hdr: user header as returned from genlmsg_put()
* @family: generic netlink family
*
* Cf. nl_dump_check_consistent(), this just provides a wrapper to make it
* simpler to use with generic netlink.
*/
static inline void genl_dump_check_consistent(struct netlink_callback *cb,
void *user_hdr,
const struct genl_family *family)
void *user_hdr)
{
nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr, family));
nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr));
}
/**

View File

@ -444,7 +444,8 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
sizeof(struct sctp_data_chunk)));
return frag;
}

View File

@ -26,9 +26,9 @@ struct ipv6_sr_hdr {
__u8 hdrlen;
__u8 type;
__u8 segments_left;
__u8 first_segment;
__u8 first_segment; /* Represents the last_entry field of SRH */
__u8 flags;
__u16 reserved;
__u16 tag;
struct in6_addr segments[0];
};

View File

@ -651,9 +651,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
struct fnhe_hash_bucket *hash;
struct fib_nh_exception *fnhe;
struct rtable *rt;
u32 genid, hval;
unsigned int i;
int depth;
u32 hval = fnhe_hashfun(daddr);
genid = fnhe_genid(dev_net(nh->nh_dev));
hval = fnhe_hashfun(daddr);
spin_lock_bh(&fnhe_lock);
@ -676,12 +679,13 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
}
if (fnhe) {
if (fnhe->fnhe_genid != genid)
fnhe->fnhe_genid = genid;
if (gw)
fnhe->fnhe_gw = gw;
if (pmtu) {
if (pmtu)
fnhe->fnhe_pmtu = pmtu;
fnhe->fnhe_expires = max(1UL, expires);
}
fnhe->fnhe_expires = max(1UL, expires);
/* Update all cached dsts too */
rt = rcu_dereference(fnhe->fnhe_rth_input);
if (rt)
@ -700,7 +704,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
fnhe->fnhe_next = hash->chain;
rcu_assign_pointer(hash->chain, fnhe);
}
fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
fnhe->fnhe_genid = genid;
fnhe->fnhe_daddr = daddr;
fnhe->fnhe_gw = gw;
fnhe->fnhe_pmtu = pmtu;

View File

@ -533,6 +533,7 @@ nla_put_failure:
return -1;
}
#if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS)
static size_t ctnetlink_proto_size(const struct nf_conn *ct)
{
const struct nf_conntrack_l3proto *l3proto;
@ -552,6 +553,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
return len + len4;
}
#endif
static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
{

View File

@ -87,7 +87,7 @@ static inline struct netlbl_af4list *__af4list_valid_rcu(struct list_head *s,
struct list_head *i = s;
struct netlbl_af4list *n = __af4list_entry(s);
while (i != h && !n->valid) {
i = rcu_dereference(i->next);
i = rcu_dereference(list_next_rcu(i));
n = __af4list_entry(i);
}
return n;
@ -154,7 +154,7 @@ static inline struct netlbl_af6list *__af6list_valid_rcu(struct list_head *s,
struct list_head *i = s;
struct netlbl_af6list *n = __af6list_entry(s);
while (i != h && !n->valid) {
i = rcu_dereference(i->next);
i = rcu_dereference(list_next_rcu(i));
n = __af6list_entry(i);
}
return n;

View File

@ -75,7 +75,7 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
if (!hdr)
return -EMSGSIZE;
genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
genl_dump_check_consistent(cb, hdr);
if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) ||
nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) ||
@ -603,7 +603,7 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
return -EMSGSIZE;
if (cb)
genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
genl_dump_check_consistent(cb, hdr);
if (nfc_genl_setup_device_added(dev, msg))
goto nla_put_failure;
@ -1356,7 +1356,7 @@ static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev,
goto nla_put_failure;
if (cb)
genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
genl_dump_check_consistent(cb, hdr);
if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
nla_put_u32(msg, NFC_ATTR_SE_INDEX, se->idx) ||

View File

@ -807,9 +807,10 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
addr->v6.sin6_flowinfo = 0;
addr->v6.sin6_port = sh->source;
addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
addr->v6.sin6_scope_id = sctp_v6_skb_iif(skb);
}
else
addr->v6.sin6_scope_id = 0;
}
*addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr);

View File

@ -3594,8 +3594,8 @@ struct sctp_chunk *sctp_make_strreset_req(
__u16 stream_num, __be16 *stream_list,
bool out, bool in)
{
__u16 stream_len = stream_num * sizeof(__u16);
struct sctp_strreset_outreq outreq;
__u16 stream_len = stream_num * 2;
struct sctp_strreset_inreq inreq;
struct sctp_chunk *retval;
__u16 outlen, inlen;

View File

@ -84,8 +84,8 @@
/* Forward declarations for internal helper functions. */
static int sctp_writeable(struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
size_t msg_len);
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len, struct sock **orig_sk);
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
@ -1970,9 +1970,16 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
if (!sctp_wspace(asoc)) {
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
if (err)
/* sk can be changed by peel off when waiting for buf. */
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
if (err) {
if (err == -ESRCH) {
/* asoc is already dead. */
new_asoc = NULL;
err = -EPIPE;
}
goto out_free;
}
}
/* If an address is passed with the sendto/sendmsg call, it is used
@ -3133,9 +3140,9 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsign
*/
static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_assoc_value params;
struct sctp_association *asoc;
struct sctp_sock *sp = sctp_sk(sk);
int val;
if (optlen == sizeof(int)) {
@ -3151,26 +3158,35 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
val = params.assoc_value;
} else
} else {
return -EINVAL;
}
if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)))
return -EINVAL;
if (val) {
int min_len, max_len;
min_len = SCTP_DEFAULT_MINSEGMENT - sp->pf->af->net_header_len;
min_len -= sizeof(struct sctphdr) +
sizeof(struct sctp_data_chunk);
max_len = SCTP_MAX_CHUNK_LEN - sizeof(struct sctp_data_chunk);
if (val < min_len || val > max_len)
return -EINVAL;
}
asoc = sctp_id2assoc(sk, params.assoc_id);
if (!asoc && params.assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
if (val == 0) {
val = asoc->pathmtu;
val -= sp->pf->af->net_header_len;
val = asoc->pathmtu - sp->pf->af->net_header_len;
val -= sizeof(struct sctphdr) +
sizeof(struct sctp_data_chunk);
sizeof(struct sctp_data_chunk);
}
asoc->user_frag = val;
asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
} else {
if (params.assoc_id && sctp_style(sk, UDP))
return -EINVAL;
sp->user_frag = val;
}
@ -5015,12 +5031,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
if (!asoc)
return -EINVAL;
/* If there is a thread waiting on more sndbuf space for
* sending on this asoc, it cannot be peeled.
*/
if (waitqueue_active(&asoc->wait))
return -EBUSY;
/* An association cannot be branched off from an already peeled-off
* socket, nor is this supported for tcp style sockets.
*/
@ -7989,7 +7999,7 @@ void sctp_sock_rfree(struct sk_buff *skb)
/* Helper function to wait for space in the sndbuf. */
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len)
size_t msg_len, struct sock **orig_sk)
{
struct sock *sk = asoc->base.sk;
int err = 0;
@ -8006,10 +8016,11 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
for (;;) {
prepare_to_wait_exclusive(&asoc->wait, &wait,
TASK_INTERRUPTIBLE);
if (asoc->base.dead)
goto do_dead;
if (!*timeo_p)
goto do_nonblock;
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
asoc->base.dead)
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
goto do_error;
if (signal_pending(current))
goto do_interrupted;
@ -8022,11 +8033,17 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
lock_sock(sk);
if (sk != asoc->base.sk) {
release_sock(sk);
sk = asoc->base.sk;
lock_sock(sk);
}
*timeo_p = current_timeo;
}
out:
*orig_sk = sk;
finish_wait(&asoc->wait, &wait);
/* Release the association's refcnt. */
@ -8034,6 +8051,10 @@ out:
return err;
do_dead:
err = -ESRCH;
goto out;
do_error:
err = -EPIPE;
goto out;

View File

@ -282,15 +282,31 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
str_nums = params->srs_number_streams;
str_list = params->srs_stream_list;
if (out && str_nums)
for (i = 0; i < str_nums; i++)
if (str_list[i] >= stream->outcnt)
goto out;
if (str_nums) {
int param_len = 0;
if (in && str_nums)
for (i = 0; i < str_nums; i++)
if (str_list[i] >= stream->incnt)
goto out;
if (out) {
for (i = 0; i < str_nums; i++)
if (str_list[i] >= stream->outcnt)
goto out;
param_len = str_nums * sizeof(__u16) +
sizeof(struct sctp_strreset_outreq);
}
if (in) {
for (i = 0; i < str_nums; i++)
if (str_list[i] >= stream->incnt)
goto out;
param_len += str_nums * sizeof(__u16) +
sizeof(struct sctp_strreset_inreq);
}
if (param_len > SCTP_MAX_CHUNK_LEN -
sizeof(struct sctp_reconf_chunk))
goto out;
}
nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL);
if (!nstr_list) {

View File

@ -174,7 +174,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (fragid == LAST_FRAGMENT) {
TIPC_SKB_CB(head)->validated = false;
if (unlikely(!tipc_msg_validate(head)))
if (unlikely(!tipc_msg_validate(&head)))
goto err;
*buf = head;
TIPC_SKB_CB(head)->tail = NULL;
@ -201,11 +201,21 @@ err:
* TIPC will ignore the excess, under the assumption that it is optional info
* introduced by a later release of the protocol.
*/
bool tipc_msg_validate(struct sk_buff *skb)
bool tipc_msg_validate(struct sk_buff **_skb)
{
struct tipc_msg *msg;
struct sk_buff *skb = *_skb;
struct tipc_msg *hdr;
int msz, hsz;
/* Ensure that flow control ratio condition is satisfied */
if (unlikely(skb->truesize / buf_roundup_len(skb) > 4)) {
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb)
return false;
kfree_skb(*_skb);
*_skb = skb;
}
if (unlikely(TIPC_SKB_CB(skb)->validated))
return true;
if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
@ -217,11 +227,11 @@ bool tipc_msg_validate(struct sk_buff *skb)
if (unlikely(!pskb_may_pull(skb, hsz)))
return false;
msg = buf_msg(skb);
if (unlikely(msg_version(msg) != TIPC_VERSION))
hdr = buf_msg(skb);
if (unlikely(msg_version(hdr) != TIPC_VERSION))
return false;
msz = msg_size(msg);
msz = msg_size(hdr);
if (unlikely(msz < hsz))
return false;
if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
@ -411,7 +421,7 @@ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
skb_pull(*iskb, offset);
imsz = msg_size(buf_msg(*iskb));
skb_trim(*iskb, imsz);
if (unlikely(!tipc_msg_validate(*iskb)))
if (unlikely(!tipc_msg_validate(iskb)))
goto none;
*pos += align(imsz);
return true;

View File

@ -926,7 +926,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
}
struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
bool tipc_msg_validate(struct sk_buff *skb);
bool tipc_msg_validate(struct sk_buff **_skb);
bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
struct sk_buff_head *xmitq);
@ -954,6 +954,11 @@ static inline u16 buf_seqno(struct sk_buff *skb)
return msg_seqno(buf_msg(skb));
}
static inline int buf_roundup_len(struct sk_buff *skb)
{
return (skb->len / 1024 + 1) * 1024;
}
/* tipc_skb_peek(): peek and reserve first buffer in list
* @list: list to be peeked in
* Returns pointer to first buffer in list, if any

View File

@ -1539,7 +1539,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
__skb_queue_head_init(&xmitq);
/* Ensure message is well-formed before touching the header */
if (unlikely(!tipc_msg_validate(skb)))
if (unlikely(!tipc_msg_validate(&skb)))
goto discard;
hdr = buf_msg(skb);
usr = msg_user(hdr);

View File

@ -6291,7 +6291,7 @@ static int nl80211_send_regdom(struct sk_buff *msg, struct netlink_callback *cb,
if (!hdr)
return -1;
genl_dump_check_consistent(cb, hdr, &nl80211_fam);
genl_dump_check_consistent(cb, hdr);
if (nl80211_put_regdom(regdom, msg))
goto nla_put_failure;
@ -7722,7 +7722,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
if (!hdr)
return -1;
genl_dump_check_consistent(cb, hdr, &nl80211_fam);
genl_dump_check_consistent(cb, hdr);
if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation))
goto nla_put_failure;

View File

@ -1305,6 +1305,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
newp->xfrm_nr = old->xfrm_nr;
newp->index = old->index;
newp->type = old->type;
newp->family = old->family;
memcpy(newp->xfrm_vec, old->xfrm_vec,
newp->xfrm_nr*sizeof(struct xfrm_tmpl));
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
@ -1360,29 +1361,36 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
struct net *net = xp_net(policy);
int nx;
int i, error;
xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
xfrm_address_t tmp;
for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
struct xfrm_state *x;
xfrm_address_t *local;
xfrm_address_t *remote;
xfrm_address_t *remote = daddr;
xfrm_address_t *local = saddr;
struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
if (xfrm_addr_any(local, tmpl->encap_family)) {
error = xfrm_get_saddr(net, fl->flowi_oif,
&tmp, remote,
tmpl->encap_family, 0);
if (error)
goto fail;
local = &tmp;
if (tmpl->mode == XFRM_MODE_TUNNEL ||
tmpl->mode == XFRM_MODE_BEET) {
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
if (xfrm_addr_any(local, tmpl->encap_family)) {
error = xfrm_get_saddr(net, fl->flowi_oif,
&tmp, remote,
tmpl->encap_family, 0);
if (error)
goto fail;
local = &tmp;
}
}
x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
if (x && x->km.state == XFRM_STATE_VALID) {
xfrm[nx++] = x;
daddr = remote;
saddr = local;
continue;
}
if (x) {