Merge branch 'devlink-resource'

Jiri Pirko says:

====================
devlink: Add support for resource abstraction

Arkadi says:

Many of the ASIC's internal resources are limited and are shared between
several hardware procedures. For example, unified hash-based memory can
be used for many lookup purposes, like FDB and LPM. In many cases the user
can provide a partitioning scheme for such a resource in order to perform
fine tuning for his application. In such cases performing driver reload is
needed for the changes to take place, thus this patchset also adds support
for hot reload.

Such an abstraction can be coupled with devlink's dpipe interface, which
models the ASIC's pipeline as a graph of match/action tables. By modeling
the hardware resource object, and by coupling it to several dpipe tables,
further visibility can be achieved in order to debug ASIC-wide issues.

The proposed interface will provide the user the ability to understand the
limitations of the hardware, and receive notification regarding its occupancy.
Furthermore, monitoring the resource occupancy can be done in real-time and
can be useful in many cases.

---
v2->v3
- Mix/Max/Gran attributes.
- Add resource consumption per table.
- Change basic resource unit to 'entry'.
- ABI documentation.

v1->v2
- Add resource size attribute.
- Fix split bug.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-01-16 14:15:36 -05:00
commit fedae6d7e6
12 changed files with 1213 additions and 134 deletions

View file

@ -0,0 +1,33 @@
What: /kvd/
Date: 08-Jan-2018
KernelVersion: v4.16
Contact: mlxsw@mellanox.com
Description: The main database in the Spectrum device is a centralized
KVD database used for many of the tables used to configure
the chip including L2 FDB, L3 LPM, ECMP and more. The KVD
is divided into two sections, the first is hash-based table
and the second is a linear access table. The division
between the linear and hash-based sections is static and
require reload before the changes take effect.
What: /kvd/linear
Date: 08-Jan-2018
KernelVersion: v4.16
Contact: mlxsw@mellanox.com
Description: The linear section of the KVD is managed by software as a
flat memory accessed using an index.
What: /kvd/hash_single
Date: 08-Jan-2018
KernelVersion: v4.16
Contact: mlxsw@mellanox.com
Description: The hash based section of the KVD is managed by the switch
device. Used in case the key size is smaller or equal to
64bit.
What: /kvd/hash_double
Date: 08-Jan-2018
KernelVersion: v4.16
Contact: mlxsw@mellanox.com
Description: The hash based section of the KVD is managed by the switch
device. Used in case the key is larger than 64 bit.

View file

@ -113,6 +113,7 @@ struct mlxsw_core {
struct mlxsw_thermal *thermal;
struct mlxsw_core_port *ports;
unsigned int max_ports;
bool reload_fail;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
@ -962,7 +963,28 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
pool_type, p_cur, p_max);
}
static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus;
int err;
if (!mlxsw_bus->reset)
return -EOPNOTSUPP;
mlxsw_core_bus_device_unregister(mlxsw_core, true);
mlxsw_bus->reset(mlxsw_core->bus_priv);
err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
mlxsw_core->bus,
mlxsw_core->bus_priv, true,
devlink);
if (err)
mlxsw_core->reload_fail = true;
return err;
}
static const struct devlink_ops mlxsw_devlink_ops = {
.reload = mlxsw_devlink_core_bus_device_reload,
.port_type_set = mlxsw_devlink_port_type_set,
.port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit,
@ -980,23 +1002,26 @@ static const struct devlink_ops mlxsw_devlink_ops = {
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv)
void *bus_priv, bool reload,
struct devlink *devlink)
{
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
struct devlink *devlink;
size_t alloc_size;
int err;
mlxsw_driver = mlxsw_core_driver_get(device_kind);
if (!mlxsw_driver)
return -EINVAL;
alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
if (!devlink) {
err = -ENOMEM;
goto err_devlink_alloc;
if (!reload) {
alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
if (!devlink) {
err = -ENOMEM;
goto err_devlink_alloc;
}
}
mlxsw_core = devlink_priv(devlink);
@ -1012,6 +1037,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_bus_init;
if (mlxsw_driver->resources_register && !reload) {
err = mlxsw_driver->resources_register(mlxsw_core);
if (err)
goto err_register_resources;
}
err = mlxsw_ports_init(mlxsw_core);
if (err)
goto err_ports_init;
@ -1032,9 +1063,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_emad_init;
err = devlink_register(devlink, mlxsw_bus_info->dev);
if (err)
goto err_devlink_register;
if (!reload) {
err = devlink_register(devlink, mlxsw_bus_info->dev);
if (err)
goto err_devlink_register;
}
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
@ -1057,7 +1090,8 @@ err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
err_hwmon_init:
devlink_unregister(devlink);
if (!reload)
devlink_unregister(devlink);
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
@ -1067,26 +1101,40 @@ err_alloc_lag_mapping:
err_ports_init:
mlxsw_bus->fini(bus_priv);
err_bus_init:
devlink_free(devlink);
if (!reload)
devlink_resources_unregister(devlink, NULL);
err_register_resources:
if (!reload)
devlink_free(devlink);
err_devlink_alloc:
mlxsw_core_driver_put(device_kind);
return err;
}
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
bool reload)
{
const char *device_kind = mlxsw_core->bus_info->device_kind;
struct devlink *devlink = priv_to_devlink(mlxsw_core);
if (mlxsw_core->reload_fail)
goto reload_fail;
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
devlink_unregister(devlink);
if (!reload)
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core);
if (!reload)
devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
if (reload)
return;
reload_fail:
devlink_free(devlink);
mlxsw_core_driver_put(device_kind);
}
@ -1791,6 +1839,22 @@ void mlxsw_core_flush_owq(void)
}
EXPORT_SYMBOL(mlxsw_core_flush_owq);
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size)
{
struct mlxsw_driver *driver = mlxsw_core->driver;
if (!driver->kvd_sizes_get)
return -EINVAL;
return driver->kvd_sizes_get(mlxsw_core, profile,
p_single_size, p_double_size,
p_linear_size);
}
EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
static int __init mlxsw_core_module_init(void)
{
int err;

View file

@ -66,8 +66,9 @@ void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
void *bus_priv, bool reload,
struct devlink *devlink);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
u8 local_port;
@ -308,10 +309,20 @@ struct mlxsw_driver {
u32 *p_cur, u32 *p_max);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
@ -332,6 +343,7 @@ struct mlxsw_bus {
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res);
void (*fini)(void *bus_priv);
void (*reset)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,

View file

@ -539,7 +539,8 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
mlxsw_i2c->dev = &client->dev;
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
&mlxsw_i2c_bus, mlxsw_i2c);
&mlxsw_i2c_bus, mlxsw_i2c, false,
NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
return err;
@ -557,7 +558,7 @@ static int mlxsw_i2c_remove(struct i2c_client *client)
{
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
mlxsw_core_bus_device_unregister(mlxsw_i2c->core);
mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
mutex_destroy(&mlxsw_i2c->cmd.lock);
return 0;

View file

@ -154,6 +154,7 @@ struct mlxsw_pci {
} comp;
} cmd;
struct mlxsw_bus_info bus_info;
const struct pci_device_id *id;
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@ -1052,38 +1053,18 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
}
static int
mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
{
u32 single_size, double_size, linear_size;
u64 single_size, double_size, linear_size;
int err;
if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) ||
!MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) ||
!profile->used_kvd_split_data)
return -EIO;
linear_size = profile->kvd_linear_size;
/* The hash part is what left of the kvd without the
* linear part. It is split to the single size and
* double size by the parts ratio from the profile.
* Both sizes must be a multiplications of the
* granularity from the profile.
*/
double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size;
double_size *= profile->kvd_hash_double_parts;
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
double_size /= profile->kvd_hash_granularity;
double_size *= profile->kvd_hash_granularity;
single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size -
linear_size;
/* Check results are legal. */
if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) ||
double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) ||
MLXSW_RES_GET(res, KVD_SIZE) < linear_size)
return -EIO;
err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
&single_size, &double_size,
&linear_size);
if (err)
return err;
MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
@ -1184,7 +1165,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mbox, profile->adaptive_routing_group_cap);
}
if (MLXSW_RES_VALID(res, KVD_SIZE)) {
err = mlxsw_pci_profile_get_kvd_sizes(profile, res);
err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
return err;
@ -1622,16 +1603,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
return err;
}
static const struct mlxsw_bus mlxsw_pci_bus = {
.kind = "pci",
.init = mlxsw_pci_init,
.fini = mlxsw_pci_fini,
.skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
.skb_transmit = mlxsw_pci_skb_transmit,
.cmd_exec = mlxsw_pci_cmd_exec,
.features = MLXSW_BUS_F_TXRX,
};
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
const struct pci_device_id *id)
{
@ -1660,6 +1631,41 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return 0;
}
static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
{
pci_free_irq_vectors(mlxsw_pci->pdev);
}
static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
{
int err;
err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
if (err < 0)
dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
return err;
}
static void mlxsw_pci_reset(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
mlxsw_pci_free_irq_vectors(mlxsw_pci);
mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
}
static const struct mlxsw_bus mlxsw_pci_bus = {
.kind = "pci",
.init = mlxsw_pci_init,
.fini = mlxsw_pci_fini,
.skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
.skb_transmit = mlxsw_pci_skb_transmit,
.cmd_exec = mlxsw_pci_cmd_exec,
.features = MLXSW_BUS_F_TXRX,
.reset = mlxsw_pci_reset,
};
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
@ -1721,7 +1727,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_sw_reset;
}
err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
if (err < 0) {
dev_err(&pdev->dev, "MSI-X init failed\n");
goto err_msix_init;
@ -1730,9 +1736,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
mlxsw_pci->id = id;
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
&mlxsw_pci_bus, mlxsw_pci);
&mlxsw_pci_bus, mlxsw_pci, false,
NULL);
if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
@ -1741,7 +1749,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_bus_device_register:
pci_free_irq_vectors(mlxsw_pci->pdev);
mlxsw_pci_free_irq_vectors(mlxsw_pci);
err_msix_init:
err_sw_reset:
iounmap(mlxsw_pci->hw_addr);
@ -1760,8 +1768,8 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
{
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
mlxsw_core_bus_device_unregister(mlxsw_pci->core);
pci_free_irq_vectors(mlxsw_pci->pdev);
mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
mlxsw_pci_free_irq_vectors(mlxsw_pci);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);

View file

@ -3991,6 +3991,253 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.resource_query_enable = 1,
};
static bool
mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
u64 size)
{
const struct mlxsw_config_profile *profile;
profile = &mlxsw_sp_config_profile;
if (size % profile->kvd_hash_granularity) {
NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
return false;
}
return true;
}
static int
mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
struct netlink_ext_ack *extack)
{
NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
return -EINVAL;
}
static int
mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
struct netlink_ext_ack *extack)
{
if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
return -EINVAL;
return 0;
}
static int
mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
return -EINVAL;
if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
return -EINVAL;
}
return 0;
}
static int
mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
return -EINVAL;
if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
return -EINVAL;
}
return 0;
}
static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
}
static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
.size_validate = mlxsw_sp_resource_kvd_size_validate,
};
static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
.size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
.occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
};
static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
.size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
};
static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
.size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
};
static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
{
u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
KVD_SINGLE_MIN_SIZE);
u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
KVD_DOUBLE_MIN_SIZE);
u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
u32 linear_size_min = 0;
/* KVD top resource */
mlxsw_sp_kvd_size_params.size_min = kvd_size;
mlxsw_sp_kvd_size_params.size_max = kvd_size;
mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
/* Linear part init */
mlxsw_sp_linear_size_params.size_min = linear_size_min;
mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
double_size_min;
mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
/* Hash double part init */
mlxsw_sp_hash_double_size_params.size_min = double_size_min;
mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
linear_size_min;
mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
/* Hash single part init */
mlxsw_sp_hash_single_size_params.size_min = single_size_min;
mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
linear_size_min;
mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
}
static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
u32 kvd_size, single_size, double_size, linear_size;
const struct mlxsw_config_profile *profile;
int err;
profile = &mlxsw_sp_config_profile;
if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
return -EIO;
mlxsw_sp_resource_size_params_prepare(mlxsw_core);
kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
true, kvd_size,
MLXSW_SP_RESOURCE_KVD,
DEVLINK_RESOURCE_ID_PARENT_TOP,
&mlxsw_sp_kvd_size_params,
&mlxsw_sp_resource_kvd_ops);
if (err)
return err;
linear_size = profile->kvd_linear_size;
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
false, linear_size,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD,
&mlxsw_sp_linear_size_params,
&mlxsw_sp_resource_kvd_linear_ops);
if (err)
return err;
double_size = kvd_size - linear_size;
double_size *= profile->kvd_hash_double_parts;
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
double_size = rounddown(double_size, profile->kvd_hash_granularity);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
false, double_size,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
MLXSW_SP_RESOURCE_KVD,
&mlxsw_sp_hash_double_size_params,
&mlxsw_sp_resource_kvd_hash_double_ops);
if (err)
return err;
single_size = kvd_size - double_size - linear_size;
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
false, single_size,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD,
&mlxsw_sp_hash_single_size_params,
&mlxsw_sp_resource_kvd_hash_single_ops);
if (err)
return err;
return 0;
}
static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
u32 double_size;
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
!profile->used_kvd_split_data)
return -EIO;
/* The hash part is what left of the kvd without the
* linear part. It is split to the single size and
* double size by the parts ratio from the profile.
* Both sizes must be a multiplications of the
* granularity from the profile. In case the user
* provided the sizes they are obtained via devlink.
*/
err = devlink_resource_size_get(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR,
p_linear_size);
if (err)
*p_linear_size = profile->kvd_linear_size;
err = devlink_resource_size_get(devlink,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
p_double_size);
if (err) {
double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
*p_linear_size;
double_size *= profile->kvd_hash_double_parts;
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
*p_double_size = rounddown(double_size,
profile->kvd_hash_granularity);
}
err = devlink_resource_size_get(devlink,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
p_single_size);
if (err)
*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
*p_double_size - *p_linear_size;
/* Check results are legal. */
if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
*p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
return -EIO;
return 0;
}
static struct mlxsw_driver mlxsw_sp_driver = {
.kind = mlxsw_sp_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
@ -4010,6 +4257,8 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp_config_profile,
};

View file

@ -66,6 +66,18 @@
#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
#define MLXSW_SP_RESOURCE_NAME_KVD "kvd"
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
};
struct mlxsw_sp_port;
struct mlxsw_sp_rif;
@ -436,6 +448,7 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;

View file

@ -771,14 +771,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
.size_get = mlxsw_sp_dpipe_table_host4_size_get,
};
#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4 1
static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
return devlink_dpipe_table_register(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
&mlxsw_sp_host4_ops,
mlxsw_sp, false);
err = devlink_dpipe_table_register(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
&mlxsw_sp_host4_ops,
mlxsw_sp, false);
if (err)
return err;
err = devlink_dpipe_table_resource_set(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
devlink_dpipe_table_unregister(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
return err;
}
static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
@ -829,14 +848,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
.size_get = mlxsw_sp_dpipe_table_host6_size_get,
};
#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6 2
static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
return devlink_dpipe_table_register(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
&mlxsw_sp_host6_ops,
mlxsw_sp, false);
err = devlink_dpipe_table_register(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
&mlxsw_sp_host6_ops,
mlxsw_sp, false);
if (err)
return err;
err = devlink_dpipe_table_resource_set(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
devlink_dpipe_table_unregister(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
return err;
}
static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
@ -1213,14 +1251,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
.size_get = mlxsw_sp_dpipe_table_adj_size_get,
};
#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ 1
static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
return devlink_dpipe_table_register(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
&mlxsw_sp_dpipe_table_adj_ops,
mlxsw_sp, false);
err = devlink_dpipe_table_register(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
&mlxsw_sp_dpipe_table_adj_ops,
mlxsw_sp, false);
if (err)
return err;
err = devlink_dpipe_table_resource_set(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
devlink_dpipe_table_unregister(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
return err;
}
static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)

View file

@ -286,6 +286,32 @@ static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
}
u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
{
unsigned int nr_entries;
int bit = -1;
u64 occ = 0;
nr_entries = (part->info->end_index -
part->info->start_index + 1) /
part->info->alloc_size;
while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
< nr_entries)
occ += part->info->alloc_size;
return occ;
}
u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl_part *part;
u64 occ = 0;
list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list)
occ += mlxsw_sp_kvdl_part_occ(part);
return occ;
}
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl;

View file

@ -26,10 +26,12 @@ struct devlink {
struct list_head port_list;
struct list_head sb_list;
struct list_head dpipe_table_list;
struct list_head resource_list;
struct devlink_dpipe_headers *dpipe_headers;
const struct devlink_ops *ops;
struct device *dev;
possible_net_t _net;
struct mutex lock;
char priv[0] __aligned(NETDEV_ALIGN);
};
@ -181,6 +183,9 @@ struct devlink_dpipe_table_ops;
* @counters_enabled: indicates if counters are active
* @counter_control_extern: indicates if counter control is in dpipe or
* external tool
* @resource_valid: Indicate that the resource id is valid
* @resource_id: relative resource this table is related to
* @resource_units: number of resource's unit consumed per table's entry
* @table_ops: table operations
* @rcu: rcu
*/
@ -190,6 +195,9 @@ struct devlink_dpipe_table {
const char *name;
bool counters_enabled;
bool counter_control_extern;
bool resource_valid;
u64 resource_id;
u64 resource_units;
struct devlink_dpipe_table_ops *table_ops;
struct rcu_head rcu;
};
@ -223,7 +231,63 @@ struct devlink_dpipe_headers {
unsigned int headers_count;
};
/**
* struct devlink_resource_ops - resource ops
* @occ_get: get the occupied size
* @size_validate: validate the size of the resource before update, reload
* is needed for changes to take place
*/
struct devlink_resource_ops {
u64 (*occ_get)(struct devlink *devlink);
int (*size_validate)(struct devlink *devlink, u64 size,
struct netlink_ext_ack *extack);
};
/**
* struct devlink_resource_size_params - resource's size parameters
* @size_min: minimum size which can be set
* @size_max: maximum size which can be set
* @size_granularity: size granularity
* @size_unit: resource's basic unit
*/
struct devlink_resource_size_params {
u64 size_min;
u64 size_max;
u64 size_granularity;
enum devlink_resource_unit unit;
};
/**
* struct devlink_resource - devlink resource
* @name: name of the resource
* @id: id, per devlink instance
* @size: size of the resource
* @size_new: updated size of the resource, reload is needed
* @size_valid: valid in case the total size of the resource is valid
* including its children
* @parent: parent resource
* @size_params: size parameters
* @list: parent list
* @resource_list: list of child resources
* @resource_ops: resource ops
*/
struct devlink_resource {
const char *name;
u64 id;
u64 size;
u64 size_new;
bool size_valid;
struct devlink_resource *parent;
struct devlink_resource_size_params *size_params;
struct list_head list;
struct list_head resource_list;
const struct devlink_resource_ops *resource_ops;
};
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
struct devlink_ops {
int (*reload)(struct devlink *devlink);
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
int (*port_split)(struct devlink *devlink, unsigned int port_index,
@ -332,6 +396,23 @@ extern struct devlink_dpipe_header devlink_dpipe_header_ethernet;
extern struct devlink_dpipe_header devlink_dpipe_header_ipv4;
extern struct devlink_dpipe_header devlink_dpipe_header_ipv6;
int devlink_resource_register(struct devlink *devlink,
const char *resource_name,
bool top_hierarchy,
u64 resource_size,
u64 resource_id,
u64 parent_resource_id,
struct devlink_resource_size_params *size_params,
const struct devlink_resource_ops *resource_ops);
void devlink_resources_unregister(struct devlink *devlink,
struct devlink_resource *resource);
int devlink_resource_size_get(struct devlink *devlink,
u64 resource_id,
u64 *p_resource_size);
int devlink_dpipe_table_resource_set(struct devlink *devlink,
const char *table_name, u64 resource_id,
u64 resource_units);
#else
static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
@ -468,6 +549,40 @@ devlink_dpipe_match_put(struct sk_buff *skb,
return 0;
}
static inline int
devlink_resource_register(struct devlink *devlink,
const char *resource_name,
bool top_hierarchy,
u64 resource_size,
u64 resource_id,
u64 parent_resource_id,
struct devlink_resource_size_params *size_params,
const struct devlink_resource_ops *resource_ops)
{
return 0;
}
static inline void
devlink_resources_unregister(struct devlink *devlink,
struct devlink_resource *resource)
{
}
static inline int
devlink_resource_size_get(struct devlink *devlink, u64 resource_id,
u64 *p_resource_size)
{
return -EOPNOTSUPP;
}
static inline int
devlink_dpipe_table_resource_set(struct devlink *devlink,
const char *table_name, u64 resource_id,
u64 resource_units)
{
return -EOPNOTSUPP;
}
#endif
#endif /* _NET_DEVLINK_H_ */

View file

@ -70,6 +70,13 @@ enum devlink_command {
DEVLINK_CMD_DPIPE_ENTRIES_GET,
DEVLINK_CMD_DPIPE_HEADERS_GET,
DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
DEVLINK_CMD_RESOURCE_SET,
DEVLINK_CMD_RESOURCE_DUMP,
/* Hot driver reload, makes configuration changes take place. The
* devlink instance is not released during the process.
*/
DEVLINK_CMD_RELOAD,
/* add new commands above here */
__DEVLINK_CMD_MAX,
@ -202,6 +209,20 @@ enum devlink_attr {
DEVLINK_ATTR_PAD,
DEVLINK_ATTR_ESWITCH_ENCAP_MODE, /* u8 */
DEVLINK_ATTR_RESOURCE_LIST, /* nested */
DEVLINK_ATTR_RESOURCE, /* nested */
DEVLINK_ATTR_RESOURCE_NAME, /* string */
DEVLINK_ATTR_RESOURCE_ID, /* u64 */
DEVLINK_ATTR_RESOURCE_SIZE, /* u64 */
DEVLINK_ATTR_RESOURCE_SIZE_NEW, /* u64 */
DEVLINK_ATTR_RESOURCE_SIZE_VALID, /* u8 */
DEVLINK_ATTR_RESOURCE_SIZE_MIN, /* u64 */
DEVLINK_ATTR_RESOURCE_SIZE_MAX, /* u64 */
DEVLINK_ATTR_RESOURCE_SIZE_GRAN, /* u64 */
DEVLINK_ATTR_RESOURCE_UNIT, /* u8 */
DEVLINK_ATTR_RESOURCE_OCC, /* u64 */
DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, /* u64 */
DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,/* u64 */
/* add new attributes above here, update the policy in devlink.c */
@ -245,4 +266,8 @@ enum devlink_dpipe_header_id {
DEVLINK_DPIPE_HEADER_IPV6,
};
enum devlink_resource_unit {
DEVLINK_RESOURCE_UNIT_ENTRY,
};
#endif /* _UAPI_LINUX_DEVLINK_H_ */

View file

@ -92,12 +92,6 @@ static LIST_HEAD(devlink_list);
*/
static DEFINE_MUTEX(devlink_mutex);
/* devlink_port_mutex
*
* Shared lock to guard lists of ports in all devlink devices.
*/
static DEFINE_MUTEX(devlink_port_mutex);
static struct net *devlink_net(const struct devlink *devlink)
{
return read_pnet(&devlink->_net);
@ -335,15 +329,18 @@ devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
#define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0)
#define DEVLINK_NL_FLAG_NEED_PORT BIT(1)
#define DEVLINK_NL_FLAG_NEED_SB BIT(2)
#define DEVLINK_NL_FLAG_LOCK_PORTS BIT(3)
/* port is not needed but we need to ensure they don't
* change in the middle of command
*/
/* The per devlink instance lock is taken by default in the pre-doit
* operation, yet several commands do not require this. The global
* devlink lock is taken and protects from disruption by user-calls.
*/
#define DEVLINK_NL_FLAG_NO_LOCK BIT(3)
static int devlink_nl_pre_doit(const struct genl_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink;
int err;
mutex_lock(&devlink_mutex);
devlink = devlink_get_from_info(info);
@ -351,44 +348,47 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
mutex_unlock(&devlink_mutex);
return PTR_ERR(devlink);
}
if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
mutex_lock(&devlink->lock);
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) {
info->user_ptr[0] = devlink;
} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
struct devlink_port *devlink_port;
mutex_lock(&devlink_port_mutex);
devlink_port = devlink_port_get_from_info(devlink, info);
if (IS_ERR(devlink_port)) {
mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex);
return PTR_ERR(devlink_port);
err = PTR_ERR(devlink_port);
goto unlock;
}
info->user_ptr[0] = devlink_port;
}
if (ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS) {
mutex_lock(&devlink_port_mutex);
}
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) {
struct devlink_sb *devlink_sb;
devlink_sb = devlink_sb_get_from_info(devlink, info);
if (IS_ERR(devlink_sb)) {
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT)
mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex);
return PTR_ERR(devlink_sb);
err = PTR_ERR(devlink_sb);
goto unlock;
}
info->user_ptr[1] = devlink_sb;
}
return 0;
unlock:
if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
mutex_unlock(&devlink->lock);
mutex_unlock(&devlink_mutex);
return err;
}
static void devlink_nl_post_doit(const struct genl_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT ||
ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS)
mutex_unlock(&devlink_port_mutex);
struct devlink *devlink;
devlink = devlink_get_from_info(info);
if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
mutex_unlock(&devlink->lock);
mutex_unlock(&devlink_mutex);
}
@ -614,10 +614,10 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
int err;
mutex_lock(&devlink_mutex);
mutex_lock(&devlink_port_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
continue;
mutex_lock(&devlink->lock);
list_for_each_entry(devlink_port, &devlink->port_list, list) {
if (idx < start) {
idx++;
@ -628,13 +628,15 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err)
if (err) {
mutex_unlock(&devlink->lock);
goto out;
}
idx++;
}
mutex_unlock(&devlink->lock);
}
out:
mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex);
cb->args[0] = idx;
@ -801,6 +803,7 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
continue;
mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
if (idx < start) {
idx++;
@ -811,10 +814,13 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err)
if (err) {
mutex_unlock(&devlink->lock);
goto out;
}
idx++;
}
mutex_unlock(&devlink->lock);
}
out:
mutex_unlock(&devlink_mutex);
@ -935,14 +941,18 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
!devlink->ops || !devlink->ops->sb_pool_get)
continue;
mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
if (err && err != -EOPNOTSUPP)
if (err && err != -EOPNOTSUPP) {
mutex_unlock(&devlink->lock);
goto out;
}
}
mutex_unlock(&devlink->lock);
}
out:
mutex_unlock(&devlink_mutex);
@ -1123,22 +1133,24 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
int err;
mutex_lock(&devlink_mutex);
mutex_lock(&devlink_port_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
!devlink->ops || !devlink->ops->sb_port_pool_get)
continue;
mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_port_pool_get_dumpit(msg, start, &idx,
devlink, devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
if (err && err != -EOPNOTSUPP)
if (err && err != -EOPNOTSUPP) {
mutex_unlock(&devlink->lock);
goto out;
}
}
mutex_unlock(&devlink->lock);
}
out:
mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex);
cb->args[0] = idx;
@ -1347,23 +1359,26 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
int err;
mutex_lock(&devlink_mutex);
mutex_lock(&devlink_port_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
!devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
continue;
mutex_lock(&devlink->lock);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
devlink,
devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
if (err && err != -EOPNOTSUPP)
if (err && err != -EOPNOTSUPP) {
mutex_unlock(&devlink->lock);
goto out;
}
}
mutex_unlock(&devlink->lock);
}
out:
mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex);
cb->args[0] = idx;
@ -1679,6 +1694,12 @@ static int devlink_dpipe_table_put(struct sk_buff *skb,
table->counters_enabled))
goto nla_put_failure;
if (table->resource_valid) {
nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
table->resource_id, DEVLINK_ATTR_PAD);
nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
table->resource_units, DEVLINK_ATTR_PAD);
}
if (devlink_dpipe_matches_put(table, skb))
goto nla_put_failure;
@ -2273,6 +2294,272 @@ static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
counters_enable);
}
struct devlink_resource *
devlink_resource_find(struct devlink *devlink,
struct devlink_resource *resource, u64 resource_id)
{
struct list_head *resource_list;
if (resource)
resource_list = &resource->resource_list;
else
resource_list = &devlink->resource_list;
list_for_each_entry(resource, resource_list, list) {
struct devlink_resource *child_resource;
if (resource->id == resource_id)
return resource;
child_resource = devlink_resource_find(devlink, resource,
resource_id);
if (child_resource)
return child_resource;
}
return NULL;
}
void devlink_resource_validate_children(struct devlink_resource *resource)
{
struct devlink_resource *child_resource;
bool size_valid = true;
u64 parts_size = 0;
if (list_empty(&resource->resource_list))
goto out;
list_for_each_entry(child_resource, &resource->resource_list, list)
parts_size += child_resource->size_new;
if (parts_size > resource->size)
size_valid = false;
out:
resource->size_valid = size_valid;
}
static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_resource *resource;
u64 resource_id;
u64 size;
int err;
if (!info->attrs[DEVLINK_ATTR_RESOURCE_ID] ||
!info->attrs[DEVLINK_ATTR_RESOURCE_SIZE])
return -EINVAL;
resource_id = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_ID]);
resource = devlink_resource_find(devlink, NULL, resource_id);
if (!resource)
return -EINVAL;
if (!resource->resource_ops->size_validate)
return -EINVAL;
size = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]);
err = resource->resource_ops->size_validate(devlink, size,
info->extack);
if (err)
return err;
resource->size_new = size;
devlink_resource_validate_children(resource);
if (resource->parent)
devlink_resource_validate_children(resource->parent);
return 0;
}
static void
devlink_resource_size_params_put(struct devlink_resource *resource,
struct sk_buff *skb)
{
struct devlink_resource_size_params *size_params;
size_params = resource->size_params;
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
size_params->size_granularity, DEVLINK_ATTR_PAD);
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
size_params->size_max, DEVLINK_ATTR_PAD);
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
size_params->size_min, DEVLINK_ATTR_PAD);
nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit);
}
static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
struct devlink_resource *resource)
{
struct devlink_resource *child_resource;
struct nlattr *child_resource_attr;
struct nlattr *resource_attr;
resource_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE);
if (!resource_attr)
return -EMSGSIZE;
if (nla_put_string(skb, DEVLINK_ATTR_RESOURCE_NAME, resource->name) ||
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size,
DEVLINK_ATTR_PAD) ||
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id,
DEVLINK_ATTR_PAD))
goto nla_put_failure;
if (resource->size != resource->size_new)
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
resource->size_new, DEVLINK_ATTR_PAD);
if (resource->resource_ops && resource->resource_ops->occ_get)
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
resource->resource_ops->occ_get(devlink),
DEVLINK_ATTR_PAD);
devlink_resource_size_params_put(resource, skb);
if (list_empty(&resource->resource_list))
goto out;
if (nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_SIZE_VALID,
resource->size_valid))
goto nla_put_failure;
child_resource_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE_LIST);
if (!child_resource_attr)
goto nla_put_failure;
list_for_each_entry(child_resource, &resource->resource_list, list) {
if (devlink_resource_put(devlink, skb, child_resource))
goto resource_put_failure;
}
nla_nest_end(skb, child_resource_attr);
out:
nla_nest_end(skb, resource_attr);
return 0;
resource_put_failure:
nla_nest_cancel(skb, child_resource_attr);
nla_put_failure:
nla_nest_cancel(skb, resource_attr);
return -EMSGSIZE;
}
static int devlink_resource_fill(struct genl_info *info,
enum devlink_command cmd, int flags)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_resource *resource;
struct nlattr *resources_attr;
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
bool incomplete;
void *hdr;
int i;
int err;
resource = list_first_entry(&devlink->resource_list,
struct devlink_resource, list);
start_again:
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
if (err)
return err;
hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
&devlink_nl_family, NLM_F_MULTI, cmd);
if (!hdr) {
nlmsg_free(skb);
return -EMSGSIZE;
}
if (devlink_nl_put_handle(skb, devlink))
goto nla_put_failure;
resources_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE_LIST);
if (!resources_attr)
goto nla_put_failure;
incomplete = false;
i = 0;
list_for_each_entry_from(resource, &devlink->resource_list, list) {
err = devlink_resource_put(devlink, skb, resource);
if (err) {
if (!i)
goto err_resource_put;
incomplete = true;
break;
}
i++;
}
nla_nest_end(skb, resources_attr);
genlmsg_end(skb, hdr);
if (incomplete)
goto start_again;
send_done:
nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
NLMSG_DONE, 0, flags | NLM_F_MULTI);
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
if (err)
goto err_skb_send_alloc;
goto send_done;
}
return genlmsg_reply(skb, info);
nla_put_failure:
err = -EMSGSIZE;
err_resource_put:
err_skb_send_alloc:
genlmsg_cancel(skb, hdr);
nlmsg_free(skb);
return err;
}
static int devlink_nl_cmd_resource_dump(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
if (list_empty(&devlink->resource_list))
return -EOPNOTSUPP;
return devlink_resource_fill(info, DEVLINK_CMD_RESOURCE_DUMP, 0);
}
static int
devlink_resources_validate(struct devlink *devlink,
struct devlink_resource *resource,
struct genl_info *info)
{
struct list_head *resource_list;
int err = 0;
if (resource)
resource_list = &resource->resource_list;
else
resource_list = &devlink->resource_list;
list_for_each_entry(resource, resource_list, list) {
if (!resource->size_valid)
return -EINVAL;
err = devlink_resources_validate(devlink, resource, info);
if (err)
return err;
}
return err;
}
static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
int err;
if (!devlink->ops->reload)
return -EOPNOTSUPP;
err = devlink_resources_validate(devlink, NULL, info);
if (err) {
NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed");
return err;
}
return devlink->ops->reload(devlink);
}
static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
@ -2291,6 +2578,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
[DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
[DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
};
static const struct genl_ops devlink_nl_ops[] = {
@ -2322,14 +2611,16 @@ static const struct genl_ops devlink_nl_ops[] = {
.doit = devlink_nl_cmd_port_split_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_PORT_UNSPLIT,
.doit = devlink_nl_cmd_port_unsplit_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_SB_GET,
@ -2397,8 +2688,7 @@ static const struct genl_ops devlink_nl_ops[] = {
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB |
DEVLINK_NL_FLAG_LOCK_PORTS,
DEVLINK_NL_FLAG_NEED_SB,
},
{
.cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
@ -2406,8 +2696,7 @@ static const struct genl_ops devlink_nl_ops[] = {
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB |
DEVLINK_NL_FLAG_LOCK_PORTS,
DEVLINK_NL_FLAG_NEED_SB,
},
{
.cmd = DEVLINK_CMD_ESWITCH_GET,
@ -2451,6 +2740,28 @@ static const struct genl_ops devlink_nl_ops[] = {
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_RESOURCE_SET,
.doit = devlink_nl_cmd_resource_set,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_RESOURCE_DUMP,
.doit = devlink_nl_cmd_resource_dump,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_RELOAD,
.doit = devlink_nl_cmd_reload,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
},
};
static struct genl_family devlink_nl_family __ro_after_init = {
@ -2488,6 +2799,8 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
INIT_LIST_HEAD(&devlink->port_list);
INIT_LIST_HEAD(&devlink->sb_list);
INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
INIT_LIST_HEAD(&devlink->resource_list);
mutex_init(&devlink->lock);
return devlink;
}
EXPORT_SYMBOL_GPL(devlink_alloc);
@ -2550,16 +2863,16 @@ int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
unsigned int port_index)
{
mutex_lock(&devlink_port_mutex);
mutex_lock(&devlink->lock);
if (devlink_port_index_exists(devlink, port_index)) {
mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink->lock);
return -EEXIST;
}
devlink_port->devlink = devlink;
devlink_port->index = port_index;
devlink_port->registered = true;
list_add_tail(&devlink_port->list, &devlink->port_list);
mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink->lock);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
return 0;
}
@ -2572,10 +2885,12 @@ EXPORT_SYMBOL_GPL(devlink_port_register);
*/
void devlink_port_unregister(struct devlink_port *devlink_port)
{
struct devlink *devlink = devlink_port->devlink;
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
mutex_lock(&devlink_port_mutex);
mutex_lock(&devlink->lock);
list_del(&devlink_port->list);
mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devlink_port_unregister);
@ -2651,7 +2966,7 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
struct devlink_sb *devlink_sb;
int err = 0;
mutex_lock(&devlink_mutex);
mutex_lock(&devlink->lock);
if (devlink_sb_index_exists(devlink, sb_index)) {
err = -EEXIST;
goto unlock;
@ -2670,7 +2985,7 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
devlink_sb->egress_tc_count = egress_tc_count;
list_add_tail(&devlink_sb->list, &devlink->sb_list);
unlock:
mutex_unlock(&devlink_mutex);
mutex_unlock(&devlink->lock);
return err;
}
EXPORT_SYMBOL_GPL(devlink_sb_register);
@ -2679,11 +2994,11 @@ void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
{
struct devlink_sb *devlink_sb;
mutex_lock(&devlink_mutex);
mutex_lock(&devlink->lock);
devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
WARN_ON(!devlink_sb);
list_del(&devlink_sb->list);
mutex_unlock(&devlink_mutex);
mutex_unlock(&devlink->lock);
kfree(devlink_sb);
}
EXPORT_SYMBOL_GPL(devlink_sb_unregister);
@ -2699,9 +3014,9 @@ EXPORT_SYMBOL_GPL(devlink_sb_unregister);
int devlink_dpipe_headers_register(struct devlink *devlink,
struct devlink_dpipe_headers *dpipe_headers)
{
mutex_lock(&devlink_mutex);
mutex_lock(&devlink->lock);
devlink->dpipe_headers = dpipe_headers;
mutex_unlock(&devlink_mutex);
mutex_unlock(&devlink->lock);
return 0;
}
EXPORT_SYMBOL_GPL(devlink_dpipe_headers_register);
@ -2715,9 +3030,9 @@ EXPORT_SYMBOL_GPL(devlink_dpipe_headers_register);
*/
void devlink_dpipe_headers_unregister(struct devlink *devlink)
{
mutex_lock(&devlink_mutex);
mutex_lock(&devlink->lock);
devlink->dpipe_headers = NULL;
mutex_unlock(&devlink_mutex);
mutex_unlock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devlink_dpipe_headers_unregister);
@ -2783,9 +3098,9 @@ int devlink_dpipe_table_register(struct devlink *devlink,
table->priv = priv;
table->counter_control_extern = counter_control_extern;
mutex_lock(&devlink_mutex);
mutex_lock(&devlink->lock);
list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
mutex_unlock(&devlink_mutex);
mutex_unlock(&devlink->lock);
return 0;
}
EXPORT_SYMBOL_GPL(devlink_dpipe_table_register);
@ -2801,20 +3116,181 @@ void devlink_dpipe_table_unregister(struct devlink *devlink,
{
struct devlink_dpipe_table *table;
mutex_lock(&devlink_mutex);
mutex_lock(&devlink->lock);
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
table_name);
if (!table)
goto unlock;
list_del_rcu(&table->list);
mutex_unlock(&devlink_mutex);
mutex_unlock(&devlink->lock);
kfree_rcu(table, rcu);
return;
unlock:
mutex_unlock(&devlink_mutex);
mutex_unlock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devlink_dpipe_table_unregister);
/**
* devlink_resource_register - devlink resource register
*
* @devlink: devlink
* @resource_name: resource's name
* @top_hierarchy: top hierarchy
* @reload_required: reload is required for new configuration to
* apply
* @resource_size: resource's size
* @resource_id: resource's id
* @parent_reosurce_id: resource's parent id
* @size params: size parameters
* @resource_ops: resource ops
*/
int devlink_resource_register(struct devlink *devlink,
const char *resource_name,
bool top_hierarchy,
u64 resource_size,
u64 resource_id,
u64 parent_resource_id,
struct devlink_resource_size_params *size_params,
const struct devlink_resource_ops *resource_ops)
{
struct devlink_resource *resource;
struct list_head *resource_list;
int err = 0;
mutex_lock(&devlink->lock);
resource = devlink_resource_find(devlink, NULL, resource_id);
if (resource) {
err = -EINVAL;
goto out;
}
resource = kzalloc(sizeof(*resource), GFP_KERNEL);
if (!resource) {
err = -ENOMEM;
goto out;
}
if (top_hierarchy) {
resource_list = &devlink->resource_list;
} else {
struct devlink_resource *parent_resource;
parent_resource = devlink_resource_find(devlink, NULL,
parent_resource_id);
if (parent_resource) {
resource_list = &parent_resource->resource_list;
resource->parent = parent_resource;
} else {
err = -EINVAL;
goto out;
}
}
resource->name = resource_name;
resource->size = resource_size;
resource->size_new = resource_size;
resource->id = resource_id;
resource->resource_ops = resource_ops;
resource->size_valid = true;
resource->size_params = size_params;
INIT_LIST_HEAD(&resource->resource_list);
list_add_tail(&resource->list, resource_list);
out:
mutex_unlock(&devlink->lock);
return err;
}
EXPORT_SYMBOL_GPL(devlink_resource_register);
/**
* devlink_resources_unregister - free all resources
*
* @devlink: devlink
* @resource: resource
*/
void devlink_resources_unregister(struct devlink *devlink,
struct devlink_resource *resource)
{
struct devlink_resource *tmp, *child_resource;
struct list_head *resource_list;
if (resource)
resource_list = &resource->resource_list;
else
resource_list = &devlink->resource_list;
if (!resource)
mutex_lock(&devlink->lock);
list_for_each_entry_safe(child_resource, tmp, resource_list, list) {
devlink_resources_unregister(devlink, child_resource);
list_del(&child_resource->list);
kfree(child_resource);
}
if (!resource)
mutex_unlock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devlink_resources_unregister);
/**
* devlink_resource_size_get - get and update size
*
* @devlink: devlink
* @resource_id: the requested resource id
* @p_resource_size: ptr to update
*/
int devlink_resource_size_get(struct devlink *devlink,
u64 resource_id,
u64 *p_resource_size)
{
struct devlink_resource *resource;
int err = 0;
mutex_lock(&devlink->lock);
resource = devlink_resource_find(devlink, NULL, resource_id);
if (!resource) {
err = -EINVAL;
goto out;
}
*p_resource_size = resource->size_new;
resource->size = resource->size_new;
out:
mutex_unlock(&devlink->lock);
return err;
}
EXPORT_SYMBOL_GPL(devlink_resource_size_get);
/**
* devlink_dpipe_table_resource_set - set the resource id
*
* @devlink: devlink
* @table_name: table name
* @resource_id: resource id
* @resource_units: number of resource's units consumed per table's entry
*/
int devlink_dpipe_table_resource_set(struct devlink *devlink,
const char *table_name, u64 resource_id,
u64 resource_units)
{
struct devlink_dpipe_table *table;
int err = 0;
mutex_lock(&devlink->lock);
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
table_name);
if (!table) {
err = -EINVAL;
goto out;
}
table->resource_id = resource_id;
table->resource_units = resource_units;
table->resource_valid = true;
out:
mutex_unlock(&devlink->lock);
return err;
}
EXPORT_SYMBOL_GPL(devlink_dpipe_table_resource_set);
static int __init devlink_module_init(void)
{
return genl_register_family(&devlink_nl_family);