You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'mlxsw-port-mirroring'
Jiri Pirko says:
====================
mlxsw: implement port mirroring offload
This patchset introduces tc matchall classifier and its offload
to Spectrum hardware. In combination with mirred action, defined port mirroring
setup is offloaded by mlxsw/spectrum driver.
The commands used for creating mirror ports:
tc qdisc add dev eth25 handle ffff: ingress
tc filter add dev eth25 parent ffff: \
matchall skip_sw \
action mirred egress mirror \
dev eth27
tc qdisc add dev eth25 handle 1: root prio
tc filter add dev eth25 parent 1: \
matchall skip_sw \
action mirred egress mirror \
dev eth27
These patches contain:
- Resource query implementation
- Hardware port mirorring support for spectrum.
- Definition of the matchall traffic classifier.
- General support for hw-offloading for that classifier.
- Specific spectrum implementaion for matchall offloading.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -105,6 +105,7 @@ enum mlxsw_cmd_opcode {
|
||||
MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013,
|
||||
MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014,
|
||||
MLXSW_CMD_OPCODE_QUERY_EQ = 0x015,
|
||||
MLXSW_CMD_OPCODE_QUERY_RESOURCES = 0x101,
|
||||
};
|
||||
|
||||
static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
|
||||
@@ -144,6 +145,8 @@ static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
|
||||
return "HW2SW_EQ";
|
||||
case MLXSW_CMD_OPCODE_QUERY_EQ:
|
||||
return "QUERY_EQ";
|
||||
case MLXSW_CMD_OPCODE_QUERY_RESOURCES:
|
||||
return "QUERY_RESOURCES";
|
||||
default:
|
||||
return "*UNKNOWN*";
|
||||
}
|
||||
@@ -500,6 +503,35 @@ static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
|
||||
return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
|
||||
}
|
||||
|
||||
/* QUERY_RESOURCES - Query chip resources
|
||||
* --------------------------------------
|
||||
* OpMod == 0 (N/A) , INMmod is index
|
||||
* ----------------------------------
|
||||
* The QUERY_RESOURCES command retrieves information related to chip resources
|
||||
* by resource ID. Every command returns 32 entries. INmod is being use as base.
|
||||
* for example, index 1 will return entries 32-63. When the tables end and there
|
||||
* are no more sources in the table, will return resource id 0xFFF to indicate
|
||||
* it.
|
||||
*/
|
||||
static inline int mlxsw_cmd_query_resources(struct mlxsw_core *mlxsw_core,
|
||||
char *out_mbox, int index)
|
||||
{
|
||||
return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_RESOURCES,
|
||||
0, index, false, out_mbox,
|
||||
MLXSW_CMD_MBOX_SIZE);
|
||||
}
|
||||
|
||||
/* cmd_mbox_query_resource_id
|
||||
* The resource id. 0xFFFF indicates table's end.
|
||||
*/
|
||||
MLXSW_ITEM32_INDEXED(cmd_mbox, query_resource, id, 0x00, 16, 16, 0x8, 0, false);
|
||||
|
||||
/* cmd_mbox_query_resource_data
|
||||
* The resource
|
||||
*/
|
||||
MLXSW_ITEM64_INDEXED(cmd_mbox, query_resource, data,
|
||||
0x00, 0, 40, 0x8, 0, false);
|
||||
|
||||
/* CONFIG_PROFILE (Set) - Configure Switch Profile
|
||||
* ------------------------------
|
||||
* OpMod == 1 (Set), INMmod == 0 (N/A)
|
||||
|
||||
@@ -111,6 +111,7 @@ struct mlxsw_core {
|
||||
struct {
|
||||
u8 *mapping; /* lag_id+port_index to local_port mapping */
|
||||
} lag;
|
||||
struct mlxsw_resources resources;
|
||||
struct mlxsw_hwmon *hwmon;
|
||||
unsigned long driver_priv[0];
|
||||
/* driver_priv has to be always the last item */
|
||||
@@ -1110,7 +1111,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
|
||||
}
|
||||
}
|
||||
|
||||
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
|
||||
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
|
||||
&mlxsw_core->resources);
|
||||
if (err)
|
||||
goto err_bus_init;
|
||||
|
||||
@@ -1652,6 +1654,12 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
|
||||
}
|
||||
EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
|
||||
|
||||
struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core)
|
||||
{
|
||||
return &mlxsw_core->resources;
|
||||
}
|
||||
EXPORT_SYMBOL(mlxsw_core_resources_get);
|
||||
|
||||
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
|
||||
struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
|
||||
struct net_device *dev, bool split, u32 split_group)
|
||||
|
||||
@@ -215,6 +215,7 @@ struct mlxsw_config_profile {
|
||||
u32 kvd_linear_size;
|
||||
u32 kvd_hash_single_size;
|
||||
u32 kvd_hash_double_size;
|
||||
u8 resource_query_enable;
|
||||
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
|
||||
};
|
||||
|
||||
@@ -266,10 +267,18 @@ struct mlxsw_driver {
|
||||
const struct mlxsw_config_profile *profile;
|
||||
};
|
||||
|
||||
struct mlxsw_resources {
|
||||
u8 max_span_valid:1;
|
||||
u8 max_span;
|
||||
};
|
||||
|
||||
struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core);
|
||||
|
||||
struct mlxsw_bus {
|
||||
const char *kind;
|
||||
int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
|
||||
const struct mlxsw_config_profile *profile);
|
||||
const struct mlxsw_config_profile *profile,
|
||||
struct mlxsw_resources *resources);
|
||||
void (*fini)(void *bus_priv);
|
||||
bool (*skb_transmit_busy)(void *bus_priv,
|
||||
const struct mlxsw_tx_info *tx_info);
|
||||
|
||||
@@ -1154,6 +1154,61 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
|
||||
mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
|
||||
}
|
||||
|
||||
#define MLXSW_RESOURCES_TABLE_END_ID 0xffff
|
||||
#define MLXSW_MAX_SPAN_ID 0x2420
|
||||
#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100
|
||||
#define MLXSW_RESOURCES_PER_QUERY 32
|
||||
|
||||
static void mlxsw_pci_resources_query_parse(int id, u64 val,
|
||||
struct mlxsw_resources *resources)
|
||||
{
|
||||
switch (id) {
|
||||
case MLXSW_MAX_SPAN_ID:
|
||||
resources->max_span = val;
|
||||
resources->max_span_valid = 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
||||
struct mlxsw_resources *resources,
|
||||
u8 query_enabled)
|
||||
{
|
||||
int index, i;
|
||||
u64 data;
|
||||
u16 id;
|
||||
int err;
|
||||
|
||||
/* Not all the versions support resources query */
|
||||
if (!query_enabled)
|
||||
return 0;
|
||||
|
||||
mlxsw_cmd_mbox_zero(mbox);
|
||||
|
||||
for (index = 0; index < MLXSW_RESOURCES_QUERY_MAX_QUERIES; index++) {
|
||||
err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < MLXSW_RESOURCES_PER_QUERY; i++) {
|
||||
id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
|
||||
data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
|
||||
|
||||
if (id == MLXSW_RESOURCES_TABLE_END_ID)
|
||||
return 0;
|
||||
|
||||
mlxsw_pci_resources_query_parse(id, data, resources);
|
||||
}
|
||||
}
|
||||
|
||||
/* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
|
||||
* MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
|
||||
*/
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
||||
const struct mlxsw_config_profile *profile)
|
||||
{
|
||||
@@ -1404,7 +1459,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
|
||||
}
|
||||
|
||||
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
|
||||
const struct mlxsw_config_profile *profile)
|
||||
const struct mlxsw_config_profile *profile,
|
||||
struct mlxsw_resources *resources)
|
||||
{
|
||||
struct mlxsw_pci *mlxsw_pci = bus_priv;
|
||||
struct pci_dev *pdev = mlxsw_pci->pdev;
|
||||
@@ -1463,6 +1519,11 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
|
||||
if (err)
|
||||
goto err_boardinfo;
|
||||
|
||||
err = mlxsw_pci_resources_query(mlxsw_pci, mbox, resources,
|
||||
profile->resource_query_enable);
|
||||
if (err)
|
||||
goto err_query_resources;
|
||||
|
||||
err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
|
||||
if (err)
|
||||
goto err_config_profile;
|
||||
@@ -1485,6 +1546,7 @@ err_request_eq_irq:
|
||||
mlxsw_pci_aqs_fini(mlxsw_pci);
|
||||
err_aqs_init:
|
||||
err_config_profile:
|
||||
err_query_resources:
|
||||
err_boardinfo:
|
||||
mlxsw_pci_fw_area_fini(mlxsw_pci);
|
||||
err_fw_area_init:
|
||||
|
||||
@@ -4633,6 +4633,123 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
|
||||
mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
|
||||
}
|
||||
|
||||
/* MPAT - Monitoring Port Analyzer Table
|
||||
* -------------------------------------
|
||||
* MPAT Register is used to query and configure the Switch PortAnalyzer Table.
|
||||
* For an enabled analyzer, all fields except e (enable) cannot be modified.
|
||||
*/
|
||||
#define MLXSW_REG_MPAT_ID 0x901A
|
||||
#define MLXSW_REG_MPAT_LEN 0x78
|
||||
|
||||
static const struct mlxsw_reg_info mlxsw_reg_mpat = {
|
||||
.id = MLXSW_REG_MPAT_ID,
|
||||
.len = MLXSW_REG_MPAT_LEN,
|
||||
};
|
||||
|
||||
/* reg_mpat_pa_id
|
||||
* Port Analyzer ID.
|
||||
* Access: Index
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4);
|
||||
|
||||
/* reg_mpat_system_port
|
||||
* A unique port identifier for the final destination of the packet.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpat, system_port, 0x00, 0, 16);
|
||||
|
||||
/* reg_mpat_e
|
||||
* Enable. Indicating the Port Analyzer is enabled.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1);
|
||||
|
||||
/* reg_mpat_qos
|
||||
* Quality Of Service Mode.
|
||||
* 0: CONFIGURED - QoS parameters (Switch Priority, and encapsulation
|
||||
* PCP, DEI, DSCP or VL) are configured.
|
||||
* 1: MAINTAIN - QoS parameters (Switch Priority, Color) are the
|
||||
* same as in the original packet that has triggered the mirroring. For
|
||||
* SPAN also the pcp,dei are maintained.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1);
|
||||
|
||||
/* reg_mpat_be
|
||||
* Best effort mode. Indicates mirroring traffic should not cause packet
|
||||
* drop or back pressure, but will discard the mirrored packets. Mirrored
|
||||
* packets will be forwarded on a best effort manner.
|
||||
* 0: Do not discard mirrored packets
|
||||
* 1: Discard mirrored packets if causing congestion
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1);
|
||||
|
||||
static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
|
||||
u16 system_port, bool e)
|
||||
{
|
||||
MLXSW_REG_ZERO(mpat, payload);
|
||||
mlxsw_reg_mpat_pa_id_set(payload, pa_id);
|
||||
mlxsw_reg_mpat_system_port_set(payload, system_port);
|
||||
mlxsw_reg_mpat_e_set(payload, e);
|
||||
mlxsw_reg_mpat_qos_set(payload, 1);
|
||||
mlxsw_reg_mpat_be_set(payload, 1);
|
||||
}
|
||||
|
||||
/* MPAR - Monitoring Port Analyzer Register
|
||||
* ----------------------------------------
|
||||
* MPAR register is used to query and configure the port analyzer port mirroring
|
||||
* properties.
|
||||
*/
|
||||
#define MLXSW_REG_MPAR_ID 0x901B
|
||||
#define MLXSW_REG_MPAR_LEN 0x08
|
||||
|
||||
static const struct mlxsw_reg_info mlxsw_reg_mpar = {
|
||||
.id = MLXSW_REG_MPAR_ID,
|
||||
.len = MLXSW_REG_MPAR_LEN,
|
||||
};
|
||||
|
||||
/* reg_mpar_local_port
|
||||
* The local port to mirror the packets from.
|
||||
* Access: Index
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8);
|
||||
|
||||
enum mlxsw_reg_mpar_i_e {
|
||||
MLXSW_REG_MPAR_TYPE_EGRESS,
|
||||
MLXSW_REG_MPAR_TYPE_INGRESS,
|
||||
};
|
||||
|
||||
/* reg_mpar_i_e
|
||||
* Ingress/Egress
|
||||
* Access: Index
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpar, i_e, 0x00, 0, 4);
|
||||
|
||||
/* reg_mpar_enable
|
||||
* Enable mirroring
|
||||
* By default, port mirroring is disabled for all ports.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1);
|
||||
|
||||
/* reg_mpar_pa_id
|
||||
* Port Analyzer ID.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
|
||||
|
||||
static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
|
||||
enum mlxsw_reg_mpar_i_e i_e,
|
||||
bool enable, u8 pa_id)
|
||||
{
|
||||
MLXSW_REG_ZERO(mpar, payload);
|
||||
mlxsw_reg_mpar_local_port_set(payload, local_port);
|
||||
mlxsw_reg_mpar_enable_set(payload, enable);
|
||||
mlxsw_reg_mpar_i_e_set(payload, i_e);
|
||||
mlxsw_reg_mpar_pa_id_set(payload, pa_id);
|
||||
}
|
||||
|
||||
/* MLCR - Management LED Control Register
|
||||
* --------------------------------------
|
||||
* Controls the system LEDs.
|
||||
@@ -5062,6 +5179,45 @@ static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
|
||||
mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
|
||||
}
|
||||
|
||||
/* SBIB - Shared Buffer Internal Buffer Register
|
||||
* ---------------------------------------------
|
||||
* The SBIB register configures per port buffers for internal use. The internal
|
||||
* buffers consume memory on the port buffers (note that the port buffers are
|
||||
* used also by PBMC).
|
||||
*
|
||||
* For Spectrum this is used for egress mirroring.
|
||||
*/
|
||||
#define MLXSW_REG_SBIB_ID 0xB006
|
||||
#define MLXSW_REG_SBIB_LEN 0x10
|
||||
|
||||
static const struct mlxsw_reg_info mlxsw_reg_sbib = {
|
||||
.id = MLXSW_REG_SBIB_ID,
|
||||
.len = MLXSW_REG_SBIB_LEN,
|
||||
};
|
||||
|
||||
/* reg_sbib_local_port
|
||||
* Local port number
|
||||
* Not supported for CPU port and router port
|
||||
* Access: Index
|
||||
*/
|
||||
MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
|
||||
|
||||
/* reg_sbib_buff_size
|
||||
* Units represented in cells
|
||||
* Allowed range is 0 to (cap_max_headroom_size - 1)
|
||||
* Default is 0
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24);
|
||||
|
||||
static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
|
||||
u32 buff_size)
|
||||
{
|
||||
MLXSW_REG_ZERO(sbib, payload);
|
||||
mlxsw_reg_sbib_local_port_set(payload, local_port);
|
||||
mlxsw_reg_sbib_buff_size_set(payload, buff_size);
|
||||
}
|
||||
|
||||
static inline const char *mlxsw_reg_id_str(u16 reg_id)
|
||||
{
|
||||
switch (reg_id) {
|
||||
@@ -5165,6 +5321,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
|
||||
return "MFSM";
|
||||
case MLXSW_REG_MTCAP_ID:
|
||||
return "MTCAP";
|
||||
case MLXSW_REG_MPAT_ID:
|
||||
return "MPAT";
|
||||
case MLXSW_REG_MPAR_ID:
|
||||
return "MPAR";
|
||||
case MLXSW_REG_MTMP_ID:
|
||||
return "MTMP";
|
||||
case MLXSW_REG_MLCR_ID:
|
||||
@@ -5179,6 +5339,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
|
||||
return "SBMM";
|
||||
case MLXSW_REG_SBSR_ID:
|
||||
return "SBSR";
|
||||
case MLXSW_REG_SBIB_ID:
|
||||
return "SBIB";
|
||||
default:
|
||||
return "*UNKNOWN*";
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -214,6 +214,43 @@ struct mlxsw_sp_vr {
|
||||
struct mlxsw_sp_fib *fib;
|
||||
};
|
||||
|
||||
enum mlxsw_sp_span_type {
|
||||
MLXSW_SP_SPAN_EGRESS,
|
||||
MLXSW_SP_SPAN_INGRESS
|
||||
};
|
||||
|
||||
struct mlxsw_sp_span_inspected_port {
|
||||
struct list_head list;
|
||||
enum mlxsw_sp_span_type type;
|
||||
u8 local_port;
|
||||
};
|
||||
|
||||
struct mlxsw_sp_span_entry {
|
||||
u8 local_port;
|
||||
bool used;
|
||||
struct list_head bound_ports_list;
|
||||
int ref_count;
|
||||
int id;
|
||||
};
|
||||
|
||||
enum mlxsw_sp_port_mall_action_type {
|
||||
MLXSW_SP_PORT_MALL_MIRROR,
|
||||
};
|
||||
|
||||
struct mlxsw_sp_port_mall_mirror_tc_entry {
|
||||
u8 to_local_port;
|
||||
bool ingress;
|
||||
};
|
||||
|
||||
struct mlxsw_sp_port_mall_tc_entry {
|
||||
struct list_head list;
|
||||
unsigned long cookie;
|
||||
enum mlxsw_sp_port_mall_action_type type;
|
||||
union {
|
||||
struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
|
||||
};
|
||||
};
|
||||
|
||||
struct mlxsw_sp_router {
|
||||
struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
|
||||
struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
|
||||
@@ -260,6 +297,11 @@ struct mlxsw_sp {
|
||||
struct {
|
||||
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
|
||||
} kvdl;
|
||||
|
||||
struct {
|
||||
struct mlxsw_sp_span_entry *entries;
|
||||
int entries_count;
|
||||
} span;
|
||||
};
|
||||
|
||||
static inline struct mlxsw_sp_upper *
|
||||
@@ -316,6 +358,8 @@ struct mlxsw_sp_port {
|
||||
unsigned long *untagged_vlans;
|
||||
/* VLAN interfaces */
|
||||
struct list_head vports_list;
|
||||
/* TC handles */
|
||||
struct list_head mall_tc_list;
|
||||
};
|
||||
|
||||
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
|
||||
|
||||
@@ -1541,6 +1541,7 @@ static struct mlxsw_config_profile mlxsw_sx_config_profile = {
|
||||
.type = MLXSW_PORT_SWID_TYPE_ETH,
|
||||
}
|
||||
},
|
||||
.resource_query_enable = 0,
|
||||
};
|
||||
|
||||
static struct mlxsw_driver mlxsw_sx_driver = {
|
||||
|
||||
@@ -787,6 +787,7 @@ enum {
|
||||
TC_SETUP_MQPRIO,
|
||||
TC_SETUP_CLSU32,
|
||||
TC_SETUP_CLSFLOWER,
|
||||
TC_SETUP_MATCHALL,
|
||||
};
|
||||
|
||||
struct tc_cls_u32_offload;
|
||||
@@ -797,6 +798,7 @@ struct tc_to_netdev {
|
||||
u8 tc;
|
||||
struct tc_cls_u32_offload *cls_u32;
|
||||
struct tc_cls_flower_offload *cls_flower;
|
||||
struct tc_cls_matchall_offload *cls_mall;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -442,4 +442,15 @@ struct tc_cls_flower_offload {
|
||||
struct tcf_exts *exts;
|
||||
};
|
||||
|
||||
enum tc_matchall_command {
|
||||
TC_CLSMATCHALL_REPLACE,
|
||||
TC_CLSMATCHALL_DESTROY,
|
||||
};
|
||||
|
||||
struct tc_cls_matchall_offload {
|
||||
enum tc_matchall_command command;
|
||||
struct tcf_exts *exts;
|
||||
unsigned long cookie;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -24,6 +24,15 @@ static inline bool is_tcf_mirred_redirect(const struct tc_action *a)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool is_tcf_mirred_mirror(const struct tc_action *a)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
if (a->ops && a->ops->type == TCA_ACT_MIRRED)
|
||||
return to_mirred(a)->tcfm_eaction == TCA_EGRESS_MIRROR;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int tcf_mirred_ifindex(const struct tc_action *a)
|
||||
{
|
||||
return to_mirred(a)->tcfm_ifindex;
|
||||
|
||||
@@ -433,6 +433,18 @@ enum {
|
||||
|
||||
#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
|
||||
|
||||
/* Match-all classifier */
|
||||
|
||||
enum {
|
||||
TCA_MATCHALL_UNSPEC,
|
||||
TCA_MATCHALL_CLASSID,
|
||||
TCA_MATCHALL_ACT,
|
||||
TCA_MATCHALL_FLAGS,
|
||||
__TCA_MATCHALL_MAX,
|
||||
};
|
||||
|
||||
#define TCA_MATCHALL_MAX (__TCA_MATCHALL_MAX - 1)
|
||||
|
||||
/* Extended Matches */
|
||||
|
||||
struct tcf_ematch_tree_hdr {
|
||||
|
||||
@@ -494,6 +494,16 @@ config NET_CLS_FLOWER
|
||||
To compile this code as a module, choose M here: the module will
|
||||
be called cls_flower.
|
||||
|
||||
config NET_CLS_MATCHALL
|
||||
tristate "Match-all classifier"
|
||||
select NET_CLS
|
||||
---help---
|
||||
If you say Y here, you will be able to classify packets based on
|
||||
nothing. Every packet will match.
|
||||
|
||||
To compile this code as a module, choose M here: the module will
|
||||
be called cls_matchall.
|
||||
|
||||
config NET_EMATCH
|
||||
bool "Extended Matches"
|
||||
select NET_CLS
|
||||
|
||||
@@ -60,6 +60,7 @@ obj-$(CONFIG_NET_CLS_FLOW) += cls_flow.o
|
||||
obj-$(CONFIG_NET_CLS_CGROUP) += cls_cgroup.o
|
||||
obj-$(CONFIG_NET_CLS_BPF) += cls_bpf.o
|
||||
obj-$(CONFIG_NET_CLS_FLOWER) += cls_flower.o
|
||||
obj-$(CONFIG_NET_CLS_MATCHALL) += cls_matchall.o
|
||||
obj-$(CONFIG_NET_EMATCH) += ematch.o
|
||||
obj-$(CONFIG_NET_EMATCH_CMP) += em_cmp.o
|
||||
obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o
|
||||
|
||||
@@ -0,0 +1,318 @@
|
||||
/*
|
||||
* net/sched/cls_matchll.c Match-all classifier
|
||||
*
|
||||
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <net/sch_generic.h>
|
||||
#include <net/pkt_cls.h>
|
||||
|
||||
struct cls_mall_filter {
|
||||
struct tcf_exts exts;
|
||||
struct tcf_result res;
|
||||
u32 handle;
|
||||
struct rcu_head rcu;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
struct cls_mall_head {
|
||||
struct cls_mall_filter *filter;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
struct tcf_result *res)
|
||||
{
|
||||
struct cls_mall_head *head = rcu_dereference_bh(tp->root);
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (tc_skip_sw(f->flags))
|
||||
return -1;
|
||||
|
||||
return tcf_exts_exec(skb, &f->exts, res);
|
||||
}
|
||||
|
||||
static int mall_init(struct tcf_proto *tp)
|
||||
{
|
||||
struct cls_mall_head *head;
|
||||
|
||||
head = kzalloc(sizeof(*head), GFP_KERNEL);
|
||||
if (!head)
|
||||
return -ENOBUFS;
|
||||
|
||||
rcu_assign_pointer(tp->root, head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mall_destroy_filter(struct rcu_head *head)
|
||||
{
|
||||
struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
|
||||
|
||||
tcf_exts_destroy(&f->exts);
|
||||
|
||||
kfree(f);
|
||||
}
|
||||
|
||||
static int mall_replace_hw_filter(struct tcf_proto *tp,
|
||||
struct cls_mall_filter *f,
|
||||
unsigned long cookie)
|
||||
{
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct tc_to_netdev offload;
|
||||
struct tc_cls_matchall_offload mall_offload = {0};
|
||||
|
||||
offload.type = TC_SETUP_MATCHALL;
|
||||
offload.cls_mall = &mall_offload;
|
||||
offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
|
||||
offload.cls_mall->exts = &f->exts;
|
||||
offload.cls_mall->cookie = cookie;
|
||||
|
||||
return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
|
||||
&offload);
|
||||
}
|
||||
|
||||
static void mall_destroy_hw_filter(struct tcf_proto *tp,
|
||||
struct cls_mall_filter *f,
|
||||
unsigned long cookie)
|
||||
{
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct tc_to_netdev offload;
|
||||
struct tc_cls_matchall_offload mall_offload = {0};
|
||||
|
||||
offload.type = TC_SETUP_MATCHALL;
|
||||
offload.cls_mall = &mall_offload;
|
||||
offload.cls_mall->command = TC_CLSMATCHALL_DESTROY;
|
||||
offload.cls_mall->exts = NULL;
|
||||
offload.cls_mall->cookie = cookie;
|
||||
|
||||
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
|
||||
&offload);
|
||||
}
|
||||
|
||||
static bool mall_destroy(struct tcf_proto *tp, bool force)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (!force && f)
|
||||
return false;
|
||||
|
||||
if (f) {
|
||||
if (tc_should_offload(dev, tp, f->flags))
|
||||
mall_destroy_hw_filter(tp, f, (unsigned long) f);
|
||||
|
||||
call_rcu(&f->rcu, mall_destroy_filter);
|
||||
}
|
||||
RCU_INIT_POINTER(tp->root, NULL);
|
||||
kfree_rcu(head, rcu);
|
||||
return true;
|
||||
}
|
||||
|
||||
static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (f && f->handle == handle)
|
||||
return (unsigned long) f;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
|
||||
[TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
|
||||
[TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
|
||||
struct cls_mall_filter *f,
|
||||
unsigned long base, struct nlattr **tb,
|
||||
struct nlattr *est, bool ovr)
|
||||
{
|
||||
struct tcf_exts e;
|
||||
int err;
|
||||
|
||||
tcf_exts_init(&e, TCA_MATCHALL_ACT, 0);
|
||||
err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (tb[TCA_MATCHALL_CLASSID]) {
|
||||
f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
|
||||
tcf_bind_filter(tp, &f->res, base);
|
||||
}
|
||||
|
||||
tcf_exts_change(tp, &f->exts, &e);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mall_change(struct net *net, struct sk_buff *in_skb,
|
||||
struct tcf_proto *tp, unsigned long base,
|
||||
u32 handle, struct nlattr **tca,
|
||||
unsigned long *arg, bool ovr)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct cls_mall_filter *f;
|
||||
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
|
||||
u32 flags = 0;
|
||||
int err;
|
||||
|
||||
if (!tca[TCA_OPTIONS])
|
||||
return -EINVAL;
|
||||
|
||||
if (head->filter)
|
||||
return -EBUSY;
|
||||
|
||||
if (fold)
|
||||
return -EINVAL;
|
||||
|
||||
err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
|
||||
tca[TCA_OPTIONS], mall_policy);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (tb[TCA_MATCHALL_FLAGS]) {
|
||||
flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
|
||||
if (!tc_flags_valid(flags))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
f = kzalloc(sizeof(*f), GFP_KERNEL);
|
||||
if (!f)
|
||||
return -ENOBUFS;
|
||||
|
||||
tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
|
||||
|
||||
if (!handle)
|
||||
handle = 1;
|
||||
f->handle = handle;
|
||||
f->flags = flags;
|
||||
|
||||
err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
|
||||
if (err)
|
||||
goto errout;
|
||||
|
||||
if (tc_should_offload(dev, tp, flags)) {
|
||||
err = mall_replace_hw_filter(tp, f, (unsigned long) f);
|
||||
if (err) {
|
||||
if (tc_skip_sw(flags))
|
||||
goto errout;
|
||||
else
|
||||
err = 0;
|
||||
}
|
||||
}
|
||||
|
||||
*arg = (unsigned long) f;
|
||||
rcu_assign_pointer(head->filter, f);
|
||||
|
||||
return 0;
|
||||
|
||||
errout:
|
||||
kfree(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mall_delete(struct tcf_proto *tp, unsigned long arg)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
|
||||
if (tc_should_offload(dev, tp, f->flags))
|
||||
mall_destroy_hw_filter(tp, f, (unsigned long) f);
|
||||
|
||||
RCU_INIT_POINTER(head->filter, NULL);
|
||||
tcf_unbind_filter(tp, &f->res);
|
||||
call_rcu(&f->rcu, mall_destroy_filter);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (arg->count < arg->skip)
|
||||
goto skip;
|
||||
if (arg->fn(tp, (unsigned long) f, arg) < 0)
|
||||
arg->stop = 1;
|
||||
skip:
|
||||
arg->count++;
|
||||
}
|
||||
|
||||
static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
|
||||
struct sk_buff *skb, struct tcmsg *t)
|
||||
{
|
||||
struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
|
||||
struct nlattr *nest;
|
||||
|
||||
if (!f)
|
||||
return skb->len;
|
||||
|
||||
t->tcm_handle = f->handle;
|
||||
|
||||
nest = nla_nest_start(skb, TCA_OPTIONS);
|
||||
if (!nest)
|
||||
goto nla_put_failure;
|
||||
|
||||
if (f->res.classid &&
|
||||
nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (tcf_exts_dump(skb, &f->exts))
|
||||
goto nla_put_failure;
|
||||
|
||||
nla_nest_end(skb, nest);
|
||||
|
||||
if (tcf_exts_dump_stats(skb, &f->exts) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
return skb->len;
|
||||
|
||||
nla_put_failure:
|
||||
nla_nest_cancel(skb, nest);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static struct tcf_proto_ops cls_mall_ops __read_mostly = {
|
||||
.kind = "matchall",
|
||||
.classify = mall_classify,
|
||||
.init = mall_init,
|
||||
.destroy = mall_destroy,
|
||||
.get = mall_get,
|
||||
.change = mall_change,
|
||||
.delete = mall_delete,
|
||||
.walk = mall_walk,
|
||||
.dump = mall_dump,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init cls_mall_init(void)
|
||||
{
|
||||
return register_tcf_proto_ops(&cls_mall_ops);
|
||||
}
|
||||
|
||||
static void __exit cls_mall_exit(void)
|
||||
{
|
||||
unregister_tcf_proto_ops(&cls_mall_ops);
|
||||
}
|
||||
|
||||
module_init(cls_mall_init);
|
||||
module_exit(cls_mall_exit);
|
||||
|
||||
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
|
||||
MODULE_DESCRIPTION("Match-all classifier");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
Reference in New Issue
Block a user