]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - rpmsg/rpmsg.git/blob - drivers/net/ethernet/netronome/nfp/flower/match.c
Merge tag 'devicetree-fixes-for-4.19-3' of git://git.kernel.org/pub/scm/linux/kernel...
[rpmsg/rpmsg.git] / drivers / net / ethernet / netronome / nfp / flower / match.c
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
34 #include <linux/bitfield.h>
35 #include <net/pkt_cls.h>
37 #include "cmsg.h"
38 #include "main.h"
40 static void
41 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
42                             struct tc_cls_flower_offload *flow, u8 key_type,
43                             bool mask_version)
44 {
45         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
46         struct flow_dissector_key_vlan *flow_vlan;
47         u16 tmp_tci;
49         memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
50         /* Populate the metadata frame. */
51         frame->nfp_flow_key_layer = key_type;
52         frame->mask_id = ~0;
54         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
55                 flow_vlan = skb_flow_dissector_target(flow->dissector,
56                                                       FLOW_DISSECTOR_KEY_VLAN,
57                                                       target);
58                 /* Populate the tci field. */
59                 if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
60                         tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
61                                              flow_vlan->vlan_priority) |
62                                   FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
63                                              flow_vlan->vlan_id) |
64                                   NFP_FLOWER_MASK_VLAN_CFI;
65                         frame->tci = cpu_to_be16(tmp_tci);
66                 }
67         }
68 }
70 static void
71 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
72 {
73         frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
74 }
76 static int
77 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
78                         bool mask_version, enum nfp_flower_tun_type tun_type)
79 {
80         if (mask_version) {
81                 frame->in_port = cpu_to_be32(~0);
82                 return 0;
83         }
85         if (tun_type)
86                 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
87         else
88                 frame->in_port = cpu_to_be32(cmsg_port);
90         return 0;
91 }
93 static void
94 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
95                        struct tc_cls_flower_offload *flow,
96                        bool mask_version)
97 {
98         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
99         struct flow_dissector_key_eth_addrs *addr;
101         memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
103         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
104                 addr = skb_flow_dissector_target(flow->dissector,
105                                                  FLOW_DISSECTOR_KEY_ETH_ADDRS,
106                                                  target);
107                 /* Populate mac frame. */
108                 ether_addr_copy(frame->mac_dst, &addr->dst[0]);
109                 ether_addr_copy(frame->mac_src, &addr->src[0]);
110         }
112         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
113                 struct flow_dissector_key_mpls *mpls;
114                 u32 t_mpls;
116                 mpls = skb_flow_dissector_target(flow->dissector,
117                                                  FLOW_DISSECTOR_KEY_MPLS,
118                                                  target);
120                 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
121                          FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
122                          FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
123                          NFP_FLOWER_MASK_MPLS_Q;
125                 frame->mpls_lse = cpu_to_be32(t_mpls);
126         } else if (dissector_uses_key(flow->dissector,
127                                       FLOW_DISSECTOR_KEY_BASIC)) {
128                 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
129                  * bit, which indicates an mpls ether type but without any
130                  * mpls fields.
131                  */
132                 struct flow_dissector_key_basic *key_basic;
134                 key_basic = skb_flow_dissector_target(flow->dissector,
135                                                       FLOW_DISSECTOR_KEY_BASIC,
136                                                       flow->key);
137                 if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
138                     key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
139                         frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
140         }
143 static void
144 nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
145                          struct tc_cls_flower_offload *flow,
146                          bool mask_version)
148         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
149         struct flow_dissector_key_ports *tp;
151         memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
153         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
154                 tp = skb_flow_dissector_target(flow->dissector,
155                                                FLOW_DISSECTOR_KEY_PORTS,
156                                                target);
157                 frame->port_src = tp->src;
158                 frame->port_dst = tp->dst;
159         }
162 static void
163 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame,
164                           struct tc_cls_flower_offload *flow,
165                           bool mask_version)
167         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
169         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
170                 struct flow_dissector_key_basic *basic;
172                 basic = skb_flow_dissector_target(flow->dissector,
173                                                   FLOW_DISSECTOR_KEY_BASIC,
174                                                   target);
175                 frame->proto = basic->ip_proto;
176         }
178         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
179                 struct flow_dissector_key_ip *flow_ip;
181                 flow_ip = skb_flow_dissector_target(flow->dissector,
182                                                     FLOW_DISSECTOR_KEY_IP,
183                                                     target);
184                 frame->tos = flow_ip->tos;
185                 frame->ttl = flow_ip->ttl;
186         }
188         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
189                 struct flow_dissector_key_tcp *tcp;
190                 u32 tcp_flags;
192                 tcp = skb_flow_dissector_target(flow->dissector,
193                                                 FLOW_DISSECTOR_KEY_TCP, target);
194                 tcp_flags = be16_to_cpu(tcp->flags);
196                 if (tcp_flags & TCPHDR_FIN)
197                         frame->flags |= NFP_FL_TCP_FLAG_FIN;
198                 if (tcp_flags & TCPHDR_SYN)
199                         frame->flags |= NFP_FL_TCP_FLAG_SYN;
200                 if (tcp_flags & TCPHDR_RST)
201                         frame->flags |= NFP_FL_TCP_FLAG_RST;
202                 if (tcp_flags & TCPHDR_PSH)
203                         frame->flags |= NFP_FL_TCP_FLAG_PSH;
204                 if (tcp_flags & TCPHDR_URG)
205                         frame->flags |= NFP_FL_TCP_FLAG_URG;
206         }
208         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
209                 struct flow_dissector_key_control *key;
211                 key = skb_flow_dissector_target(flow->dissector,
212                                                 FLOW_DISSECTOR_KEY_CONTROL,
213                                                 target);
214                 if (key->flags & FLOW_DIS_IS_FRAGMENT)
215                         frame->flags |= NFP_FL_IP_FRAGMENTED;
216                 if (key->flags & FLOW_DIS_FIRST_FRAG)
217                         frame->flags |= NFP_FL_IP_FRAG_FIRST;
218         }
221 static void
222 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
223                         struct tc_cls_flower_offload *flow,
224                         bool mask_version)
226         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
227         struct flow_dissector_key_ipv4_addrs *addr;
229         memset(frame, 0, sizeof(struct nfp_flower_ipv4));
231         if (dissector_uses_key(flow->dissector,
232                                FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
233                 addr = skb_flow_dissector_target(flow->dissector,
234                                                  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
235                                                  target);
236                 frame->ipv4_src = addr->src;
237                 frame->ipv4_dst = addr->dst;
238         }
240         nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
243 static void
244 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
245                         struct tc_cls_flower_offload *flow,
246                         bool mask_version)
248         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
249         struct flow_dissector_key_ipv6_addrs *addr;
251         memset(frame, 0, sizeof(struct nfp_flower_ipv6));
253         if (dissector_uses_key(flow->dissector,
254                                FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
255                 addr = skb_flow_dissector_target(flow->dissector,
256                                                  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
257                                                  target);
258                 frame->ipv6_src = addr->src;
259                 frame->ipv6_dst = addr->dst;
260         }
262         nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
265 static int
266 nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow,
267                               bool mask_version)
269         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
270         struct flow_dissector_key_enc_opts *opts;
272         opts = skb_flow_dissector_target(flow->dissector,
273                                          FLOW_DISSECTOR_KEY_ENC_OPTS,
274                                          target);
275         memcpy(key_buf, opts->data, opts->len);
277         return 0;
280 static void
281 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
282                                 struct tc_cls_flower_offload *flow,
283                                 bool mask_version)
285         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
286         struct flow_dissector_key_ipv4_addrs *tun_ips;
287         struct flow_dissector_key_keyid *vni;
288         struct flow_dissector_key_ip *ip;
290         memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
292         if (dissector_uses_key(flow->dissector,
293                                FLOW_DISSECTOR_KEY_ENC_KEYID)) {
294                 u32 temp_vni;
296                 vni = skb_flow_dissector_target(flow->dissector,
297                                                 FLOW_DISSECTOR_KEY_ENC_KEYID,
298                                                 target);
299                 temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
300                 frame->tun_id = cpu_to_be32(temp_vni);
301         }
303         if (dissector_uses_key(flow->dissector,
304                                FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
305                 tun_ips =
306                    skb_flow_dissector_target(flow->dissector,
307                                              FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
308                                              target);
309                 frame->ip_src = tun_ips->src;
310                 frame->ip_dst = tun_ips->dst;
311         }
313         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
314                 ip = skb_flow_dissector_target(flow->dissector,
315                                                FLOW_DISSECTOR_KEY_ENC_IP,
316                                                target);
317                 frame->tos = ip->tos;
318                 frame->ttl = ip->ttl;
319         }
322 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
323                                   struct nfp_fl_key_ls *key_ls,
324                                   struct net_device *netdev,
325                                   struct nfp_fl_payload *nfp_flow,
326                                   enum nfp_flower_tun_type tun_type)
328         struct nfp_repr *netdev_repr;
329         int err;
330         u8 *ext;
331         u8 *msk;
333         memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
334         memset(nfp_flow->mask_data, 0, key_ls->key_size);
336         ext = nfp_flow->unmasked_data;
337         msk = nfp_flow->mask_data;
339         /* Populate Exact Metadata. */
340         nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
341                                     flow, key_ls->key_layer, false);
342         /* Populate Mask Metadata. */
343         nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
344                                     flow, key_ls->key_layer, true);
345         ext += sizeof(struct nfp_flower_meta_tci);
346         msk += sizeof(struct nfp_flower_meta_tci);
348         /* Populate Extended Metadata if Required. */
349         if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
350                 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
351                                             key_ls->key_layer_two);
352                 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
353                                             key_ls->key_layer_two);
354                 ext += sizeof(struct nfp_flower_ext_meta);
355                 msk += sizeof(struct nfp_flower_ext_meta);
356         }
358         /* Populate Exact Port data. */
359         err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
360                                       nfp_repr_get_port_id(netdev),
361                                       false, tun_type);
362         if (err)
363                 return err;
365         /* Populate Mask Port Data. */
366         err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
367                                       nfp_repr_get_port_id(netdev),
368                                       true, tun_type);
369         if (err)
370                 return err;
372         ext += sizeof(struct nfp_flower_in_port);
373         msk += sizeof(struct nfp_flower_in_port);
375         if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
376                 /* Populate Exact MAC Data. */
377                 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
378                                        flow, false);
379                 /* Populate Mask MAC Data. */
380                 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
381                                        flow, true);
382                 ext += sizeof(struct nfp_flower_mac_mpls);
383                 msk += sizeof(struct nfp_flower_mac_mpls);
384         }
386         if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
387                 /* Populate Exact TP Data. */
388                 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
389                                          flow, false);
390                 /* Populate Mask TP Data. */
391                 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
392                                          flow, true);
393                 ext += sizeof(struct nfp_flower_tp_ports);
394                 msk += sizeof(struct nfp_flower_tp_ports);
395         }
397         if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
398                 /* Populate Exact IPv4 Data. */
399                 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
400                                         flow, false);
401                 /* Populate Mask IPv4 Data. */
402                 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
403                                         flow, true);
404                 ext += sizeof(struct nfp_flower_ipv4);
405                 msk += sizeof(struct nfp_flower_ipv4);
406         }
408         if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
409                 /* Populate Exact IPv4 Data. */
410                 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
411                                         flow, false);
412                 /* Populate Mask IPv4 Data. */
413                 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
414                                         flow, true);
415                 ext += sizeof(struct nfp_flower_ipv6);
416                 msk += sizeof(struct nfp_flower_ipv6);
417         }
419         if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
420             key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
421                 __be32 tun_dst;
423                 /* Populate Exact VXLAN Data. */
424                 nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
425                 /* Populate Mask VXLAN Data. */
426                 nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
427                 tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
428                 ext += sizeof(struct nfp_flower_ipv4_udp_tun);
429                 msk += sizeof(struct nfp_flower_ipv4_udp_tun);
431                 /* Configure tunnel end point MAC. */
432                 if (nfp_netdev_is_nfp_repr(netdev)) {
433                         netdev_repr = netdev_priv(netdev);
434                         nfp_tunnel_write_macs(netdev_repr->app);
436                         /* Store the tunnel destination in the rule data.
437                          * This must be present and be an exact match.
438                          */
439                         nfp_flow->nfp_tun_ipv4_addr = tun_dst;
440                         nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
441                 }
443                 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
444                         err = nfp_flower_compile_geneve_opt(ext, flow, false);
445                         if (err)
446                                 return err;
448                         err = nfp_flower_compile_geneve_opt(msk, flow, true);
449                         if (err)
450                                 return err;
451                 }
452         }
454         return 0;