DPDK  21.11.3
rte_swx_pipeline_internal.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 #ifndef __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
5 #define __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
6 
7 #include <inttypes.h>
8 #include <string.h>
9 #include <sys/queue.h>
10 
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_cycles.h>
14 #include <rte_prefetch.h>
15 #include <rte_meter.h>
16 
17 #include <rte_swx_table_selector.h>
18 #include <rte_swx_table_learner.h>
19 #include <rte_swx_pipeline.h>
20 #include <rte_swx_ctl.h>
21 
22 #ifndef TRACE_LEVEL
23 #define TRACE_LEVEL 0
24 #endif
25 
26 #if TRACE_LEVEL
27 #define TRACE(...) printf(__VA_ARGS__)
28 #else
29 #define TRACE(...)
30 #endif
31 
32 /*
33  * Environment.
34  */
35 #define ntoh64(x) rte_be_to_cpu_64(x)
36 #define hton64(x) rte_cpu_to_be_64(x)
37 
38 /*
39  * Struct.
40  */
41 struct field {
42  char name[RTE_SWX_NAME_SIZE];
43  uint32_t n_bits;
44  uint32_t offset;
45  int var_size;
46 };
47 
48 struct struct_type {
49  TAILQ_ENTRY(struct_type) node;
50  char name[RTE_SWX_NAME_SIZE];
51  struct field *fields;
52  uint32_t n_fields;
53  uint32_t n_bits;
54  uint32_t n_bits_min;
55  int var_size;
56 };
57 
58 TAILQ_HEAD(struct_type_tailq, struct_type);
59 
60 /*
61  * Input port.
62  */
63 struct port_in_type {
64  TAILQ_ENTRY(port_in_type) node;
65  char name[RTE_SWX_NAME_SIZE];
66  struct rte_swx_port_in_ops ops;
67 };
68 
69 TAILQ_HEAD(port_in_type_tailq, port_in_type);
70 
71 struct port_in {
72  TAILQ_ENTRY(port_in) node;
73  struct port_in_type *type;
74  void *obj;
75  uint32_t id;
76 };
77 
78 TAILQ_HEAD(port_in_tailq, port_in);
79 
80 struct port_in_runtime {
82  void *obj;
83 };
84 
85 /*
86  * Output port.
87  */
88 struct port_out_type {
89  TAILQ_ENTRY(port_out_type) node;
90  char name[RTE_SWX_NAME_SIZE];
91  struct rte_swx_port_out_ops ops;
92 };
93 
94 TAILQ_HEAD(port_out_type_tailq, port_out_type);
95 
96 struct port_out {
97  TAILQ_ENTRY(port_out) node;
98  struct port_out_type *type;
99  void *obj;
100  uint32_t id;
101 };
102 
103 TAILQ_HEAD(port_out_tailq, port_out);
104 
105 struct port_out_runtime {
108  void *obj;
109 };
110 
111 /*
112  * Extern object.
113  */
114 struct extern_type_member_func {
115  TAILQ_ENTRY(extern_type_member_func) node;
116  char name[RTE_SWX_NAME_SIZE];
118  uint32_t id;
119 };
120 
121 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
122 
123 struct extern_type {
124  TAILQ_ENTRY(extern_type) node;
125  char name[RTE_SWX_NAME_SIZE];
126  struct struct_type *mailbox_struct_type;
129  struct extern_type_member_func_tailq funcs;
130  uint32_t n_funcs;
131 };
132 
133 TAILQ_HEAD(extern_type_tailq, extern_type);
134 
135 struct extern_obj {
136  TAILQ_ENTRY(extern_obj) node;
137  char name[RTE_SWX_NAME_SIZE];
138  struct extern_type *type;
139  void *obj;
140  uint32_t struct_id;
141  uint32_t id;
142 };
143 
144 TAILQ_HEAD(extern_obj_tailq, extern_obj);
145 
146 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
147 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
148 #endif
149 
150 struct extern_obj_runtime {
151  void *obj;
152  uint8_t *mailbox;
153  rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
154 };
155 
156 /*
157  * Extern function.
158  */
159 struct extern_func {
160  TAILQ_ENTRY(extern_func) node;
161  char name[RTE_SWX_NAME_SIZE];
162  struct struct_type *mailbox_struct_type;
164  uint32_t struct_id;
165  uint32_t id;
166 };
167 
168 TAILQ_HEAD(extern_func_tailq, extern_func);
169 
170 struct extern_func_runtime {
171  uint8_t *mailbox;
173 };
174 
175 /*
176  * Header.
177  */
178 struct header {
179  TAILQ_ENTRY(header) node;
180  char name[RTE_SWX_NAME_SIZE];
181  struct struct_type *st;
182  uint32_t struct_id;
183  uint32_t id;
184 };
185 
186 TAILQ_HEAD(header_tailq, header);
187 
188 struct header_runtime {
189  uint8_t *ptr0;
190  uint32_t n_bytes;
191 };
192 
193 struct header_out_runtime {
194  uint8_t *ptr0;
195  uint8_t *ptr;
196  uint32_t n_bytes;
197 };
198 
199 /*
200  * Instruction.
201  */
202 
203 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
204  * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
205  * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
206  * when transferred to packet meta-data and in NBO when transferred to packet
207  * headers.
208  */
209 
210 /* Notation conventions:
211  * -Header field: H = h.header.field (dst/src)
212  * -Meta-data field: M = m.field (dst/src)
213  * -Extern object mailbox field: E = e.field (dst/src)
214  * -Extern function mailbox field: F = f.field (dst/src)
215  * -Table action data field: T = t.field (src only)
216  * -Immediate value: I = 32-bit unsigned value (src only)
217  */
218 
219 enum instruction_type {
220  /* rx m.port_in */
221  INSTR_RX,
222 
223  /* tx port_out
224  * port_out = MI
225  */
226  INSTR_TX, /* port_out = M */
227  INSTR_TX_I, /* port_out = I */
228 
229  /* extract h.header */
230  INSTR_HDR_EXTRACT,
231  INSTR_HDR_EXTRACT2,
232  INSTR_HDR_EXTRACT3,
233  INSTR_HDR_EXTRACT4,
234  INSTR_HDR_EXTRACT5,
235  INSTR_HDR_EXTRACT6,
236  INSTR_HDR_EXTRACT7,
237  INSTR_HDR_EXTRACT8,
238 
239  /* extract h.header m.last_field_size */
240  INSTR_HDR_EXTRACT_M,
241 
242  /* lookahead h.header */
243  INSTR_HDR_LOOKAHEAD,
244 
245  /* emit h.header */
246  INSTR_HDR_EMIT,
247  INSTR_HDR_EMIT_TX,
248  INSTR_HDR_EMIT2_TX,
249  INSTR_HDR_EMIT3_TX,
250  INSTR_HDR_EMIT4_TX,
251  INSTR_HDR_EMIT5_TX,
252  INSTR_HDR_EMIT6_TX,
253  INSTR_HDR_EMIT7_TX,
254  INSTR_HDR_EMIT8_TX,
255 
256  /* validate h.header */
257  INSTR_HDR_VALIDATE,
258 
259  /* invalidate h.header */
260  INSTR_HDR_INVALIDATE,
261 
262  /* mov dst src
263  * dst = src
264  * dst = HMEF, src = HMEFTI
265  */
266  INSTR_MOV, /* dst = MEF, src = MEFT */
267  INSTR_MOV_MH, /* dst = MEF, src = H */
268  INSTR_MOV_HM, /* dst = H, src = MEFT */
269  INSTR_MOV_HH, /* dst = H, src = H */
270  INSTR_MOV_I, /* dst = HMEF, src = I */
271 
272  /* dma h.header t.field
273  * memcpy(h.header, t.field, sizeof(h.header))
274  */
275  INSTR_DMA_HT,
276  INSTR_DMA_HT2,
277  INSTR_DMA_HT3,
278  INSTR_DMA_HT4,
279  INSTR_DMA_HT5,
280  INSTR_DMA_HT6,
281  INSTR_DMA_HT7,
282  INSTR_DMA_HT8,
283 
284  /* add dst src
285  * dst += src
286  * dst = HMEF, src = HMEFTI
287  */
288  INSTR_ALU_ADD, /* dst = MEF, src = MEF */
289  INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
290  INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
291  INSTR_ALU_ADD_HH, /* dst = H, src = H */
292  INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
293  INSTR_ALU_ADD_HI, /* dst = H, src = I */
294 
295  /* sub dst src
296  * dst -= src
297  * dst = HMEF, src = HMEFTI
298  */
299  INSTR_ALU_SUB, /* dst = MEF, src = MEF */
300  INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
301  INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
302  INSTR_ALU_SUB_HH, /* dst = H, src = H */
303  INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
304  INSTR_ALU_SUB_HI, /* dst = H, src = I */
305 
306  /* ckadd dst src
307  * dst = dst '+ src[0:1] '+ src[2:3] + ...
308  * dst = H, src = {H, h.header}
309  */
310  INSTR_ALU_CKADD_FIELD, /* src = H */
311  INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
312  INSTR_ALU_CKADD_STRUCT, /* src = h.header, with any sizeof(header) */
313 
314  /* cksub dst src
315  * dst = dst '- src
316  * dst = H, src = H
317  */
318  INSTR_ALU_CKSUB_FIELD,
319 
320  /* and dst src
321  * dst &= src
322  * dst = HMEF, src = HMEFTI
323  */
324  INSTR_ALU_AND, /* dst = MEF, src = MEFT */
325  INSTR_ALU_AND_MH, /* dst = MEF, src = H */
326  INSTR_ALU_AND_HM, /* dst = H, src = MEFT */
327  INSTR_ALU_AND_HH, /* dst = H, src = H */
328  INSTR_ALU_AND_I, /* dst = HMEF, src = I */
329 
330  /* or dst src
331  * dst |= src
332  * dst = HMEF, src = HMEFTI
333  */
334  INSTR_ALU_OR, /* dst = MEF, src = MEFT */
335  INSTR_ALU_OR_MH, /* dst = MEF, src = H */
336  INSTR_ALU_OR_HM, /* dst = H, src = MEFT */
337  INSTR_ALU_OR_HH, /* dst = H, src = H */
338  INSTR_ALU_OR_I, /* dst = HMEF, src = I */
339 
340  /* xor dst src
341  * dst ^= src
342  * dst = HMEF, src = HMEFTI
343  */
344  INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
345  INSTR_ALU_XOR_MH, /* dst = MEF, src = H */
346  INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */
347  INSTR_ALU_XOR_HH, /* dst = H, src = H */
348  INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
349 
350  /* shl dst src
351  * dst <<= src
352  * dst = HMEF, src = HMEFTI
353  */
354  INSTR_ALU_SHL, /* dst = MEF, src = MEF */
355  INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
356  INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
357  INSTR_ALU_SHL_HH, /* dst = H, src = H */
358  INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
359  INSTR_ALU_SHL_HI, /* dst = H, src = I */
360 
361  /* shr dst src
362  * dst >>= src
363  * dst = HMEF, src = HMEFTI
364  */
365  INSTR_ALU_SHR, /* dst = MEF, src = MEF */
366  INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
367  INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
368  INSTR_ALU_SHR_HH, /* dst = H, src = H */
369  INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
370  INSTR_ALU_SHR_HI, /* dst = H, src = I */
371 
372  /* regprefetch REGARRAY index
373  * prefetch REGARRAY[index]
374  * index = HMEFTI
375  */
376  INSTR_REGPREFETCH_RH, /* index = H */
377  INSTR_REGPREFETCH_RM, /* index = MEFT */
378  INSTR_REGPREFETCH_RI, /* index = I */
379 
380  /* regrd dst REGARRAY index
381  * dst = REGARRAY[index]
382  * dst = HMEF, index = HMEFTI
383  */
384  INSTR_REGRD_HRH, /* dst = H, index = H */
385  INSTR_REGRD_HRM, /* dst = H, index = MEFT */
386  INSTR_REGRD_HRI, /* dst = H, index = I */
387  INSTR_REGRD_MRH, /* dst = MEF, index = H */
388  INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */
389  INSTR_REGRD_MRI, /* dst = MEF, index = I */
390 
391  /* regwr REGARRAY index src
392  * REGARRAY[index] = src
393  * index = HMEFTI, src = HMEFTI
394  */
395  INSTR_REGWR_RHH, /* index = H, src = H */
396  INSTR_REGWR_RHM, /* index = H, src = MEFT */
397  INSTR_REGWR_RHI, /* index = H, src = I */
398  INSTR_REGWR_RMH, /* index = MEFT, src = H */
399  INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */
400  INSTR_REGWR_RMI, /* index = MEFT, src = I */
401  INSTR_REGWR_RIH, /* index = I, src = H */
402  INSTR_REGWR_RIM, /* index = I, src = MEFT */
403  INSTR_REGWR_RII, /* index = I, src = I */
404 
405  /* regadd REGARRAY index src
406  * REGARRAY[index] += src
407  * index = HMEFTI, src = HMEFTI
408  */
409  INSTR_REGADD_RHH, /* index = H, src = H */
410  INSTR_REGADD_RHM, /* index = H, src = MEFT */
411  INSTR_REGADD_RHI, /* index = H, src = I */
412  INSTR_REGADD_RMH, /* index = MEFT, src = H */
413  INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */
414  INSTR_REGADD_RMI, /* index = MEFT, src = I */
415  INSTR_REGADD_RIH, /* index = I, src = H */
416  INSTR_REGADD_RIM, /* index = I, src = MEFT */
417  INSTR_REGADD_RII, /* index = I, src = I */
418 
419  /* metprefetch METARRAY index
420  * prefetch METARRAY[index]
421  * index = HMEFTI
422  */
423  INSTR_METPREFETCH_H, /* index = H */
424  INSTR_METPREFETCH_M, /* index = MEFT */
425  INSTR_METPREFETCH_I, /* index = I */
426 
427  /* meter METARRAY index length color_in color_out
428  * color_out = meter(METARRAY[index], length, color_in)
429  * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF
430  */
431  INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */
432  INSTR_METER_HHI, /* index = H, length = H, color_in = I */
433  INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */
434  INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */
435  INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */
436  INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */
437  INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */
438  INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */
439  INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */
440  INSTR_METER_IHI, /* index = I, length = H, color_in = I */
441  INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */
442  INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */
443 
444  /* table TABLE */
445  INSTR_TABLE,
446  INSTR_TABLE_AF,
447  INSTR_SELECTOR,
448  INSTR_LEARNER,
449  INSTR_LEARNER_AF,
450 
451  /* learn LEARNER ACTION_NAME [ m.action_first_arg ] */
452  INSTR_LEARNER_LEARN,
453 
454  /* forget */
455  INSTR_LEARNER_FORGET,
456 
457  /* extern e.obj.func */
458  INSTR_EXTERN_OBJ,
459 
460  /* extern f.func */
461  INSTR_EXTERN_FUNC,
462 
463  /* jmp LABEL
464  * Unconditional jump
465  */
466  INSTR_JMP,
467 
468  /* jmpv LABEL h.header
469  * Jump if header is valid
470  */
471  INSTR_JMP_VALID,
472 
473  /* jmpnv LABEL h.header
474  * Jump if header is invalid
475  */
476  INSTR_JMP_INVALID,
477 
478  /* jmph LABEL
479  * Jump if table lookup hit
480  */
481  INSTR_JMP_HIT,
482 
483  /* jmpnh LABEL
484  * Jump if table lookup miss
485  */
486  INSTR_JMP_MISS,
487 
488  /* jmpa LABEL ACTION
489  * Jump if action run
490  */
491  INSTR_JMP_ACTION_HIT,
492 
493  /* jmpna LABEL ACTION
494  * Jump if action not run
495  */
496  INSTR_JMP_ACTION_MISS,
497 
498  /* jmpeq LABEL a b
499  * Jump if a is equal to b
500  * a = HMEFT, b = HMEFTI
501  */
502  INSTR_JMP_EQ, /* a = MEFT, b = MEFT */
503  INSTR_JMP_EQ_MH, /* a = MEFT, b = H */
504  INSTR_JMP_EQ_HM, /* a = H, b = MEFT */
505  INSTR_JMP_EQ_HH, /* a = H, b = H */
506  INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
507 
508  /* jmpneq LABEL a b
509  * Jump if a is not equal to b
510  * a = HMEFT, b = HMEFTI
511  */
512  INSTR_JMP_NEQ, /* a = MEFT, b = MEFT */
513  INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */
514  INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */
515  INSTR_JMP_NEQ_HH, /* a = H, b = H */
516  INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
517 
518  /* jmplt LABEL a b
519  * Jump if a is less than b
520  * a = HMEFT, b = HMEFTI
521  */
522  INSTR_JMP_LT, /* a = MEFT, b = MEFT */
523  INSTR_JMP_LT_MH, /* a = MEFT, b = H */
524  INSTR_JMP_LT_HM, /* a = H, b = MEFT */
525  INSTR_JMP_LT_HH, /* a = H, b = H */
526  INSTR_JMP_LT_MI, /* a = MEFT, b = I */
527  INSTR_JMP_LT_HI, /* a = H, b = I */
528 
529  /* jmpgt LABEL a b
530  * Jump if a is greater than b
531  * a = HMEFT, b = HMEFTI
532  */
533  INSTR_JMP_GT, /* a = MEFT, b = MEFT */
534  INSTR_JMP_GT_MH, /* a = MEFT, b = H */
535  INSTR_JMP_GT_HM, /* a = H, b = MEFT */
536  INSTR_JMP_GT_HH, /* a = H, b = H */
537  INSTR_JMP_GT_MI, /* a = MEFT, b = I */
538  INSTR_JMP_GT_HI, /* a = H, b = I */
539 
540  /* return
541  * Return from action
542  */
543  INSTR_RETURN,
544 
545  /* Start of custom instructions. */
546  INSTR_CUSTOM_0,
547 };
548 
549 struct instr_operand {
550  uint8_t struct_id;
551  uint8_t n_bits;
552  uint8_t offset;
553  uint8_t pad;
554 };
555 
556 struct instr_io {
557  struct {
558  union {
559  struct {
560  uint8_t offset;
561  uint8_t n_bits;
562  uint8_t pad[2];
563  };
564 
565  uint32_t val;
566  };
567  } io;
568 
569  struct {
570  uint8_t header_id[8];
571  uint8_t struct_id[8];
572  uint8_t n_bytes[8];
573  } hdr;
574 };
575 
576 struct instr_hdr_validity {
577  uint8_t header_id;
578 };
579 
580 struct instr_table {
581  uint8_t table_id;
582 };
583 
584 struct instr_learn {
585  uint8_t action_id;
586  uint8_t mf_offset;
587 };
588 
589 struct instr_extern_obj {
590  uint8_t ext_obj_id;
591  uint8_t func_id;
592 };
593 
594 struct instr_extern_func {
595  uint8_t ext_func_id;
596 };
597 
598 struct instr_dst_src {
599  struct instr_operand dst;
600  union {
601  struct instr_operand src;
602  uint64_t src_val;
603  };
604 };
605 
606 struct instr_regarray {
607  uint8_t regarray_id;
608  uint8_t pad[3];
609 
610  union {
611  struct instr_operand idx;
612  uint32_t idx_val;
613  };
614 
615  union {
616  struct instr_operand dstsrc;
617  uint64_t dstsrc_val;
618  };
619 };
620 
621 struct instr_meter {
622  uint8_t metarray_id;
623  uint8_t pad[3];
624 
625  union {
626  struct instr_operand idx;
627  uint32_t idx_val;
628  };
629 
630  struct instr_operand length;
631 
632  union {
633  struct instr_operand color_in;
634  uint32_t color_in_val;
635  };
636 
637  struct instr_operand color_out;
638 };
639 
640 struct instr_dma {
641  struct {
642  uint8_t header_id[8];
643  uint8_t struct_id[8];
644  } dst;
645 
646  struct {
647  uint8_t offset[8];
648  } src;
649 
650  uint16_t n_bytes[8];
651 };
652 
653 struct instr_jmp {
654  struct instruction *ip;
655 
656  union {
657  struct instr_operand a;
658  uint8_t header_id;
659  uint8_t action_id;
660  };
661 
662  union {
663  struct instr_operand b;
664  uint64_t b_val;
665  };
666 };
667 
668 struct instruction {
669  enum instruction_type type;
670  union {
671  struct instr_io io;
672  struct instr_hdr_validity valid;
673  struct instr_dst_src mov;
674  struct instr_regarray regarray;
675  struct instr_meter meter;
676  struct instr_dma dma;
677  struct instr_dst_src alu;
678  struct instr_table table;
679  struct instr_learn learn;
680  struct instr_extern_obj ext_obj;
681  struct instr_extern_func ext_func;
682  struct instr_jmp jmp;
683  };
684 };
685 
686 struct instruction_data {
687  char label[RTE_SWX_NAME_SIZE];
688  char jmp_label[RTE_SWX_NAME_SIZE];
689  uint32_t n_users; /* user = jmp instruction to this instruction. */
690  int invalid;
691 };
692 
693 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
694 
695 /*
696  * Action.
697  */
698 typedef void
699 (*action_func_t)(struct rte_swx_pipeline *p);
700 
701 struct action {
702  TAILQ_ENTRY(action) node;
703  char name[RTE_SWX_NAME_SIZE];
704  struct struct_type *st;
705  int *args_endianness; /* 0 = Host Byte Order (HBO); 1 = Network Byte Order (NBO). */
706  struct instruction *instructions;
707  struct instruction_data *instruction_data;
708  uint32_t n_instructions;
709  uint32_t id;
710 };
711 
712 TAILQ_HEAD(action_tailq, action);
713 
714 /*
715  * Table.
716  */
717 struct table_type {
718  TAILQ_ENTRY(table_type) node;
719  char name[RTE_SWX_NAME_SIZE];
720  enum rte_swx_table_match_type match_type;
721  struct rte_swx_table_ops ops;
722 };
723 
724 TAILQ_HEAD(table_type_tailq, table_type);
725 
726 struct match_field {
727  enum rte_swx_table_match_type match_type;
728  struct field *field;
729 };
730 
731 struct table {
732  TAILQ_ENTRY(table) node;
733  char name[RTE_SWX_NAME_SIZE];
734  char args[RTE_SWX_NAME_SIZE];
735  struct table_type *type; /* NULL when n_fields == 0. */
736 
737  /* Match. */
738  struct match_field *fields;
739  uint32_t n_fields;
740  struct header *header; /* Only valid when n_fields > 0. */
741 
742  /* Action. */
743  struct action **actions;
744  struct action *default_action;
745  uint8_t *default_action_data;
746  uint32_t n_actions;
747  int default_action_is_const;
748  uint32_t action_data_size_max;
749  int *action_is_for_table_entries;
750  int *action_is_for_default_entry;
751 
752  uint32_t size;
753  uint32_t id;
754 };
755 
756 TAILQ_HEAD(table_tailq, table);
757 
758 struct table_runtime {
760  void *mailbox;
761  uint8_t **key;
762 };
763 
764 struct table_statistics {
765  uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
766  uint64_t *n_pkts_action;
767 };
768 
769 /*
770  * Selector.
771  */
772 struct selector {
773  TAILQ_ENTRY(selector) node;
774  char name[RTE_SWX_NAME_SIZE];
775 
776  struct field *group_id_field;
777  struct field **selector_fields;
778  uint32_t n_selector_fields;
779  struct header *selector_header;
780  struct field *member_id_field;
781 
782  uint32_t n_groups_max;
783  uint32_t n_members_per_group_max;
784 
785  uint32_t id;
786 };
787 
788 TAILQ_HEAD(selector_tailq, selector);
789 
790 struct selector_runtime {
791  void *mailbox;
792  uint8_t **group_id_buffer;
793  uint8_t **selector_buffer;
794  uint8_t **member_id_buffer;
795 };
796 
797 struct selector_statistics {
798  uint64_t n_pkts;
799 };
800 
801 /*
802  * Learner table.
803  */
804 struct learner {
805  TAILQ_ENTRY(learner) node;
806  char name[RTE_SWX_NAME_SIZE];
807 
808  /* Match. */
809  struct field **fields;
810  uint32_t n_fields;
811  struct header *header;
812 
813  /* Action. */
814  struct action **actions;
815  struct action *default_action;
816  uint8_t *default_action_data;
817  uint32_t n_actions;
818  int default_action_is_const;
819  uint32_t action_data_size_max;
820  int *action_is_for_table_entries;
821  int *action_is_for_default_entry;
822 
823  uint32_t size;
824  uint32_t timeout;
825  uint32_t id;
826 };
827 
828 TAILQ_HEAD(learner_tailq, learner);
829 
830 struct learner_runtime {
831  void *mailbox;
832  uint8_t **key;
833 };
834 
835 struct learner_statistics {
836  uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
837  uint64_t n_pkts_learn[2]; /* 0 = Learn OK, 1 = Learn error. */
838  uint64_t n_pkts_forget;
839  uint64_t *n_pkts_action;
840 };
841 
842 /*
843  * Register array.
844  */
845 struct regarray {
846  TAILQ_ENTRY(regarray) node;
847  char name[RTE_SWX_NAME_SIZE];
848  uint64_t init_val;
849  uint32_t size;
850  uint32_t id;
851 };
852 
853 TAILQ_HEAD(regarray_tailq, regarray);
854 
855 struct regarray_runtime {
856  uint64_t *regarray;
857  uint32_t size_mask;
858 };
859 
860 /*
861  * Meter array.
862  */
863 struct meter_profile {
864  TAILQ_ENTRY(meter_profile) node;
865  char name[RTE_SWX_NAME_SIZE];
866  struct rte_meter_trtcm_params params;
867  struct rte_meter_trtcm_profile profile;
868  uint32_t n_users;
869 };
870 
871 TAILQ_HEAD(meter_profile_tailq, meter_profile);
872 
873 struct metarray {
874  TAILQ_ENTRY(metarray) node;
875  char name[RTE_SWX_NAME_SIZE];
876  uint32_t size;
877  uint32_t id;
878 };
879 
880 TAILQ_HEAD(metarray_tailq, metarray);
881 
882 struct meter {
883  struct rte_meter_trtcm m;
884  struct meter_profile *profile;
885  enum rte_color color_mask;
886  uint8_t pad[20];
887 
888  uint64_t n_pkts[RTE_COLORS];
889  uint64_t n_bytes[RTE_COLORS];
890 };
891 
892 struct metarray_runtime {
893  struct meter *metarray;
894  uint32_t size_mask;
895 };
896 
897 /*
898  * Pipeline.
899  */
900 struct thread {
901  /* Packet. */
902  struct rte_swx_pkt pkt;
903  uint8_t *ptr;
904 
905  /* Structures. */
906  uint8_t **structs;
907 
908  /* Packet headers. */
909  struct header_runtime *headers; /* Extracted or generated headers. */
910  struct header_out_runtime *headers_out; /* Emitted headers. */
911  uint8_t *header_storage;
912  uint8_t *header_out_storage;
913  uint64_t valid_headers;
914  uint32_t n_headers_out;
915 
916  /* Packet meta-data. */
917  uint8_t *metadata;
918 
919  /* Tables. */
920  struct table_runtime *tables;
921  struct selector_runtime *selectors;
922  struct learner_runtime *learners;
923  struct rte_swx_table_state *table_state;
924  uint64_t action_id;
925  int hit; /* 0 = Miss, 1 = Hit. */
926  uint32_t learner_id;
927  uint64_t time;
928 
929  /* Extern objects and functions. */
930  struct extern_obj_runtime *extern_objs;
931  struct extern_func_runtime *extern_funcs;
932 
933  /* Instructions. */
934  struct instruction *ip;
935  struct instruction *ret;
936 };
937 
938 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
939 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
940 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
941 
942 #define HEADER_VALID(thread, header_id) \
943  MASK64_BIT_GET((thread)->valid_headers, header_id)
944 
945 static inline uint64_t
946 instr_operand_hbo(struct thread *t, const struct instr_operand *x)
947 {
948  uint8_t *x_struct = t->structs[x->struct_id];
949  uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
950  uint64_t x64 = *x64_ptr;
951  uint64_t x64_mask = UINT64_MAX >> (64 - x->n_bits);
952 
953  return x64 & x64_mask;
954 }
955 
956 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
957 
958 static inline uint64_t
959 instr_operand_nbo(struct thread *t, const struct instr_operand *x)
960 {
961  uint8_t *x_struct = t->structs[x->struct_id];
962  uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
963  uint64_t x64 = *x64_ptr;
964 
965  return ntoh64(x64) >> (64 - x->n_bits);
966 }
967 
968 #else
969 
970 #define instr_operand_nbo instr_operand_hbo
971 
972 #endif
973 
974 #define ALU(thread, ip, operator) \
975 { \
976  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
977  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
978  uint64_t dst64 = *dst64_ptr; \
979  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
980  uint64_t dst = dst64 & dst64_mask; \
981  \
982  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
983  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
984  uint64_t src64 = *src64_ptr; \
985  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
986  uint64_t src = src64 & src64_mask; \
987  \
988  uint64_t result = dst operator src; \
989  \
990  *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
991 }
992 
993 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
994 
995 #define ALU_MH(thread, ip, operator) \
996 { \
997  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
998  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
999  uint64_t dst64 = *dst64_ptr; \
1000  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1001  uint64_t dst = dst64 & dst64_mask; \
1002  \
1003  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1004  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1005  uint64_t src64 = *src64_ptr; \
1006  uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1007  \
1008  uint64_t result = dst operator src; \
1009  \
1010  *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1011 }
1012 
1013 #define ALU_HM(thread, ip, operator) \
1014 { \
1015  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1016  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1017  uint64_t dst64 = *dst64_ptr; \
1018  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1019  uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1020  \
1021  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1022  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1023  uint64_t src64 = *src64_ptr; \
1024  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1025  uint64_t src = src64 & src64_mask; \
1026  \
1027  uint64_t result = dst operator src; \
1028  result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1029  \
1030  *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1031 }
1032 
1033 #define ALU_HM_FAST(thread, ip, operator) \
1034 { \
1035  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1036  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1037  uint64_t dst64 = *dst64_ptr; \
1038  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1039  uint64_t dst = dst64 & dst64_mask; \
1040  \
1041  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1042  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1043  uint64_t src64 = *src64_ptr; \
1044  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1045  uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
1046  \
1047  uint64_t result = dst operator src; \
1048  \
1049  *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1050 }
1051 
1052 #define ALU_HH(thread, ip, operator) \
1053 { \
1054  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1055  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1056  uint64_t dst64 = *dst64_ptr; \
1057  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1058  uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1059  \
1060  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1061  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1062  uint64_t src64 = *src64_ptr; \
1063  uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1064  \
1065  uint64_t result = dst operator src; \
1066  result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1067  \
1068  *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1069 }
1070 
1071 #define ALU_HH_FAST(thread, ip, operator) \
1072 { \
1073  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1074  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1075  uint64_t dst64 = *dst64_ptr; \
1076  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1077  uint64_t dst = dst64 & dst64_mask; \
1078  \
1079  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1080  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1081  uint64_t src64 = *src64_ptr; \
1082  uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1083  \
1084  uint64_t result = dst operator src; \
1085  \
1086  *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1087 }
1088 
1089 #else
1090 
1091 #define ALU_MH ALU
1092 #define ALU_HM ALU
1093 #define ALU_HM_FAST ALU
1094 #define ALU_HH ALU
1095 #define ALU_HH_FAST ALU
1096 
1097 #endif
1098 
1099 #define ALU_I(thread, ip, operator) \
1100 { \
1101  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1102  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1103  uint64_t dst64 = *dst64_ptr; \
1104  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1105  uint64_t dst = dst64 & dst64_mask; \
1106  \
1107  uint64_t src = (ip)->alu.src_val; \
1108  \
1109  uint64_t result = dst operator src; \
1110  \
1111  *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1112 }
1113 
1114 #define ALU_MI ALU_I
1115 
1116 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1117 
1118 #define ALU_HI(thread, ip, operator) \
1119 { \
1120  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1121  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1122  uint64_t dst64 = *dst64_ptr; \
1123  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1124  uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1125  \
1126  uint64_t src = (ip)->alu.src_val; \
1127  \
1128  uint64_t result = dst operator src; \
1129  result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1130  \
1131  *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1132 }
1133 
1134 #else
1135 
1136 #define ALU_HI ALU_I
1137 
1138 #endif
1139 
1140 #define MOV(thread, ip) \
1141 { \
1142  uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1143  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1144  uint64_t dst64 = *dst64_ptr; \
1145  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1146  \
1147  uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1148  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1149  uint64_t src64 = *src64_ptr; \
1150  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1151  uint64_t src = src64 & src64_mask; \
1152  \
1153  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1154 }
1155 
1156 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1157 
1158 #define MOV_MH(thread, ip) \
1159 { \
1160  uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1161  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1162  uint64_t dst64 = *dst64_ptr; \
1163  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1164  \
1165  uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1166  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1167  uint64_t src64 = *src64_ptr; \
1168  uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1169  \
1170  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1171 }
1172 
1173 #define MOV_HM(thread, ip) \
1174 { \
1175  uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1176  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1177  uint64_t dst64 = *dst64_ptr; \
1178  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1179  \
1180  uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1181  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1182  uint64_t src64 = *src64_ptr; \
1183  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1184  uint64_t src = src64 & src64_mask; \
1185  \
1186  src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1187  *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1188 }
1189 
1190 #define MOV_HH(thread, ip) \
1191 { \
1192  uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1193  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1194  uint64_t dst64 = *dst64_ptr; \
1195  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1196  \
1197  uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1198  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1199  uint64_t src64 = *src64_ptr; \
1200  \
1201  uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1202  src = src >> (64 - (ip)->mov.dst.n_bits); \
1203  *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1204 }
1205 
1206 #else
1207 
1208 #define MOV_MH MOV
1209 #define MOV_HM MOV
1210 #define MOV_HH MOV
1211 
1212 #endif
1213 
1214 #define MOV_I(thread, ip) \
1215 { \
1216  uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1217  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1218  uint64_t dst64 = *dst64_ptr; \
1219  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1220  \
1221  uint64_t src = (ip)->mov.src_val; \
1222  \
1223  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1224 }
1225 
1226 #define JMP_CMP(thread, ip, operator) \
1227 { \
1228  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1229  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1230  uint64_t a64 = *a64_ptr; \
1231  uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1232  uint64_t a = a64 & a64_mask; \
1233  \
1234  uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1235  uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1236  uint64_t b64 = *b64_ptr; \
1237  uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1238  uint64_t b = b64 & b64_mask; \
1239  \
1240  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1241 }
1242 
1243 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1244 
1245 #define JMP_CMP_MH(thread, ip, operator) \
1246 { \
1247  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1248  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1249  uint64_t a64 = *a64_ptr; \
1250  uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1251  uint64_t a = a64 & a64_mask; \
1252  \
1253  uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1254  uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1255  uint64_t b64 = *b64_ptr; \
1256  uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1257  \
1258  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1259 }
1260 
1261 #define JMP_CMP_HM(thread, ip, operator) \
1262 { \
1263  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1264  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1265  uint64_t a64 = *a64_ptr; \
1266  uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1267  \
1268  uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1269  uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1270  uint64_t b64 = *b64_ptr; \
1271  uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1272  uint64_t b = b64 & b64_mask; \
1273  \
1274  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1275 }
1276 
1277 #define JMP_CMP_HH(thread, ip, operator) \
1278 { \
1279  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1280  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1281  uint64_t a64 = *a64_ptr; \
1282  uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1283  \
1284  uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1285  uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1286  uint64_t b64 = *b64_ptr; \
1287  uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1288  \
1289  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1290 }
1291 
1292 #define JMP_CMP_HH_FAST(thread, ip, operator) \
1293 { \
1294  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1295  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1296  uint64_t a64 = *a64_ptr; \
1297  uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1298  \
1299  uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1300  uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1301  uint64_t b64 = *b64_ptr; \
1302  uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1303  \
1304  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1305 }
1306 
1307 #else
1308 
1309 #define JMP_CMP_MH JMP_CMP
1310 #define JMP_CMP_HM JMP_CMP
1311 #define JMP_CMP_HH JMP_CMP
1312 #define JMP_CMP_HH_FAST JMP_CMP
1313 
1314 #endif
1315 
1316 #define JMP_CMP_I(thread, ip, operator) \
1317 { \
1318  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1319  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1320  uint64_t a64 = *a64_ptr; \
1321  uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1322  uint64_t a = a64 & a64_mask; \
1323  \
1324  uint64_t b = (ip)->jmp.b_val; \
1325  \
1326  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1327 }
1328 
1329 #define JMP_CMP_MI JMP_CMP_I
1330 
1331 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1332 
1333 #define JMP_CMP_HI(thread, ip, operator) \
1334 { \
1335  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1336  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1337  uint64_t a64 = *a64_ptr; \
1338  uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1339  \
1340  uint64_t b = (ip)->jmp.b_val; \
1341  \
1342  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1343 }
1344 
1345 #else
1346 
1347 #define JMP_CMP_HI JMP_CMP_I
1348 
1349 #endif
1350 
1351 #define METADATA_READ(thread, offset, n_bits) \
1352 ({ \
1353  uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1354  uint64_t m64 = *m64_ptr; \
1355  uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1356  (m64 & m64_mask); \
1357 })
1358 
1359 #define METADATA_WRITE(thread, offset, n_bits, value) \
1360 { \
1361  uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1362  uint64_t m64 = *m64_ptr; \
1363  uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1364  \
1365  uint64_t m_new = value; \
1366  \
1367  *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1368 }
1369 
1370 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1371 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1372 #endif
1373 
1374 #ifndef RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX
1375 #define RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 256
1376 #endif
1377 
1378 struct rte_swx_pipeline {
1379  struct struct_type_tailq struct_types;
1380  struct port_in_type_tailq port_in_types;
1381  struct port_in_tailq ports_in;
1382  struct port_out_type_tailq port_out_types;
1383  struct port_out_tailq ports_out;
1384  struct extern_type_tailq extern_types;
1385  struct extern_obj_tailq extern_objs;
1386  struct extern_func_tailq extern_funcs;
1387  struct header_tailq headers;
1388  struct struct_type *metadata_st;
1389  uint32_t metadata_struct_id;
1390  struct action_tailq actions;
1391  struct table_type_tailq table_types;
1392  struct table_tailq tables;
1393  struct selector_tailq selectors;
1394  struct learner_tailq learners;
1395  struct regarray_tailq regarrays;
1396  struct meter_profile_tailq meter_profiles;
1397  struct metarray_tailq metarrays;
1398 
1399  struct port_in_runtime *in;
1400  struct port_out_runtime *out;
1401  struct instruction **action_instructions;
1402  action_func_t *action_funcs;
1403  struct rte_swx_table_state *table_state;
1404  struct table_statistics *table_stats;
1405  struct selector_statistics *selector_stats;
1406  struct learner_statistics *learner_stats;
1407  struct regarray_runtime *regarray_runtime;
1408  struct metarray_runtime *metarray_runtime;
1409  struct instruction *instructions;
1410  struct instruction_data *instruction_data;
1411  instr_exec_t *instruction_table;
1412  struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1413  void *lib;
1414 
1415  uint32_t n_structs;
1416  uint32_t n_ports_in;
1417  uint32_t n_ports_out;
1418  uint32_t n_extern_objs;
1419  uint32_t n_extern_funcs;
1420  uint32_t n_actions;
1421  uint32_t n_tables;
1422  uint32_t n_selectors;
1423  uint32_t n_learners;
1424  uint32_t n_regarrays;
1425  uint32_t n_metarrays;
1426  uint32_t n_headers;
1427  uint32_t thread_id;
1428  uint32_t port_id;
1429  uint32_t n_instructions;
1430  int build_done;
1431  int numa_node;
1432 };
1433 
1434 /*
1435  * Instruction.
1436  */
1437 static inline void
1438 pipeline_port_inc(struct rte_swx_pipeline *p)
1439 {
1440  p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1441 }
1442 
1443 static inline void
1444 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
1445 {
1446  t->ip = p->instructions;
1447 }
1448 
1449 static inline void
1450 thread_ip_set(struct thread *t, struct instruction *ip)
1451 {
1452  t->ip = ip;
1453 }
1454 
1455 static inline void
1456 thread_ip_action_call(struct rte_swx_pipeline *p,
1457  struct thread *t,
1458  uint32_t action_id)
1459 {
1460  t->ret = t->ip + 1;
1461  t->ip = p->action_instructions[action_id];
1462 }
1463 
1464 static inline void
1465 thread_ip_inc(struct rte_swx_pipeline *p);
1466 
1467 static inline void
1468 thread_ip_inc(struct rte_swx_pipeline *p)
1469 {
1470  struct thread *t = &p->threads[p->thread_id];
1471 
1472  t->ip++;
1473 }
1474 
1475 static inline void
1476 thread_ip_inc_cond(struct thread *t, int cond)
1477 {
1478  t->ip += cond;
1479 }
1480 
1481 static inline void
1482 thread_yield(struct rte_swx_pipeline *p)
1483 {
1484  p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1485 }
1486 
1487 static inline void
1488 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
1489 {
1490  p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1491 }
1492 
1493 /*
1494  * rx.
1495  */
1496 static inline int
1497 __instr_rx_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1498 {
1499  struct port_in_runtime *port = &p->in[p->port_id];
1500  struct rte_swx_pkt *pkt = &t->pkt;
1501  int pkt_received;
1502 
1503  /* Packet. */
1504  pkt_received = port->pkt_rx(port->obj, pkt);
1505  t->ptr = &pkt->pkt[pkt->offset];
1506  rte_prefetch0(t->ptr);
1507 
1508  TRACE("[Thread %2u] rx %s from port %u\n",
1509  p->thread_id,
1510  pkt_received ? "1 pkt" : "0 pkts",
1511  p->port_id);
1512 
1513  /* Headers. */
1514  t->valid_headers = 0;
1515  t->n_headers_out = 0;
1516 
1517  /* Meta-data. */
1518  METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1519 
1520  /* Tables. */
1521  t->table_state = p->table_state;
1522 
1523  /* Thread. */
1524  pipeline_port_inc(p);
1525 
1526  return pkt_received;
1527 }
1528 
1529 static inline void
1530 instr_rx_exec(struct rte_swx_pipeline *p)
1531 {
1532  struct thread *t = &p->threads[p->thread_id];
1533  struct instruction *ip = t->ip;
1534  int pkt_received;
1535 
1536  /* Packet. */
1537  pkt_received = __instr_rx_exec(p, t, ip);
1538 
1539  /* Thread. */
1540  thread_ip_inc_cond(t, pkt_received);
1541  thread_yield(p);
1542 }
1543 
1544 /*
1545  * tx.
1546  */
1547 static inline void
1548 emit_handler(struct thread *t)
1549 {
1550  struct header_out_runtime *h0 = &t->headers_out[0];
1551  struct header_out_runtime *h1 = &t->headers_out[1];
1552  uint32_t offset = 0, i;
1553 
1554  /* No header change or header decapsulation. */
1555  if ((t->n_headers_out == 1) &&
1556  (h0->ptr + h0->n_bytes == t->ptr)) {
1557  TRACE("Emit handler: no header change or header decap.\n");
1558 
1559  t->pkt.offset -= h0->n_bytes;
1560  t->pkt.length += h0->n_bytes;
1561 
1562  return;
1563  }
1564 
1565  /* Header encapsulation (optionally, with prior header decapsulation). */
1566  if ((t->n_headers_out == 2) &&
1567  (h1->ptr + h1->n_bytes == t->ptr) &&
1568  (h0->ptr == h0->ptr0)) {
1569  uint32_t offset;
1570 
1571  TRACE("Emit handler: header encapsulation.\n");
1572 
1573  offset = h0->n_bytes + h1->n_bytes;
1574  memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1575  t->pkt.offset -= offset;
1576  t->pkt.length += offset;
1577 
1578  return;
1579  }
1580 
1581  /* For any other case. */
1582  TRACE("Emit handler: complex case.\n");
1583 
1584  for (i = 0; i < t->n_headers_out; i++) {
1585  struct header_out_runtime *h = &t->headers_out[i];
1586 
1587  memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1588  offset += h->n_bytes;
1589  }
1590 
1591  if (offset) {
1592  memcpy(t->ptr - offset, t->header_out_storage, offset);
1593  t->pkt.offset -= offset;
1594  t->pkt.length += offset;
1595  }
1596 }
1597 
1598 static inline void
1599 __instr_tx_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1600 {
1601  uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1602  struct port_out_runtime *port = &p->out[port_id];
1603  struct rte_swx_pkt *pkt = &t->pkt;
1604 
1605  TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
1606  p->thread_id,
1607  (uint32_t)port_id);
1608 
1609  /* Headers. */
1610  emit_handler(t);
1611 
1612  /* Packet. */
1613  port->pkt_tx(port->obj, pkt);
1614 }
1615 
1616 static inline void
1617 __instr_tx_i_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1618 {
1619  uint64_t port_id = ip->io.io.val;
1620  struct port_out_runtime *port = &p->out[port_id];
1621  struct rte_swx_pkt *pkt = &t->pkt;
1622 
1623  TRACE("[Thread %2u]: tx (i) 1 pkt to port %u\n",
1624  p->thread_id,
1625  (uint32_t)port_id);
1626 
1627  /* Headers. */
1628  emit_handler(t);
1629 
1630  /* Packet. */
1631  port->pkt_tx(port->obj, pkt);
1632 }
1633 
1634 /*
1635  * extract.
1636  */
1637 static inline void
1638 __instr_hdr_extract_many_exec(struct rte_swx_pipeline *p __rte_unused,
1639  struct thread *t,
1640  const struct instruction *ip,
1641  uint32_t n_extract)
1642 {
1643  uint64_t valid_headers = t->valid_headers;
1644  uint8_t *ptr = t->ptr;
1645  uint32_t offset = t->pkt.offset;
1646  uint32_t length = t->pkt.length;
1647  uint32_t i;
1648 
1649  for (i = 0; i < n_extract; i++) {
1650  uint32_t header_id = ip->io.hdr.header_id[i];
1651  uint32_t struct_id = ip->io.hdr.struct_id[i];
1652  uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1653 
1654  TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
1655  p->thread_id,
1656  header_id,
1657  n_bytes);
1658 
1659  /* Headers. */
1660  t->structs[struct_id] = ptr;
1661  valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1662 
1663  /* Packet. */
1664  offset += n_bytes;
1665  length -= n_bytes;
1666  ptr += n_bytes;
1667  }
1668 
1669  /* Headers. */
1670  t->valid_headers = valid_headers;
1671 
1672  /* Packet. */
1673  t->pkt.offset = offset;
1674  t->pkt.length = length;
1675  t->ptr = ptr;
1676 }
1677 
1678 static inline void
1679 __instr_hdr_extract_exec(struct rte_swx_pipeline *p,
1680  struct thread *t,
1681  const struct instruction *ip)
1682 {
1683  __instr_hdr_extract_many_exec(p, t, ip, 1);
1684 }
1685 
1686 static inline void
1687 __instr_hdr_extract2_exec(struct rte_swx_pipeline *p,
1688  struct thread *t,
1689  const struct instruction *ip)
1690 {
1691  TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
1692 
1693  __instr_hdr_extract_many_exec(p, t, ip, 2);
1694 }
1695 
1696 static inline void
1697 __instr_hdr_extract3_exec(struct rte_swx_pipeline *p,
1698  struct thread *t,
1699  const struct instruction *ip)
1700 {
1701  TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
1702 
1703  __instr_hdr_extract_many_exec(p, t, ip, 3);
1704 }
1705 
1706 static inline void
1707 __instr_hdr_extract4_exec(struct rte_swx_pipeline *p,
1708  struct thread *t,
1709  const struct instruction *ip)
1710 {
1711  TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
1712 
1713  __instr_hdr_extract_many_exec(p, t, ip, 4);
1714 }
1715 
1716 static inline void
1717 __instr_hdr_extract5_exec(struct rte_swx_pipeline *p,
1718  struct thread *t,
1719  const struct instruction *ip)
1720 {
1721  TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
1722 
1723  __instr_hdr_extract_many_exec(p, t, ip, 5);
1724 }
1725 
1726 static inline void
1727 __instr_hdr_extract6_exec(struct rte_swx_pipeline *p,
1728  struct thread *t,
1729  const struct instruction *ip)
1730 {
1731  TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
1732 
1733  __instr_hdr_extract_many_exec(p, t, ip, 6);
1734 }
1735 
1736 static inline void
1737 __instr_hdr_extract7_exec(struct rte_swx_pipeline *p,
1738  struct thread *t,
1739  const struct instruction *ip)
1740 {
1741  TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
1742 
1743  __instr_hdr_extract_many_exec(p, t, ip, 7);
1744 }
1745 
1746 static inline void
1747 __instr_hdr_extract8_exec(struct rte_swx_pipeline *p,
1748  struct thread *t,
1749  const struct instruction *ip)
1750 {
1751  TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
1752 
1753  __instr_hdr_extract_many_exec(p, t, ip, 8);
1754 }
1755 
1756 static inline void
1757 __instr_hdr_extract_m_exec(struct rte_swx_pipeline *p __rte_unused,
1758  struct thread *t,
1759  const struct instruction *ip)
1760 {
1761  uint64_t valid_headers = t->valid_headers;
1762  uint8_t *ptr = t->ptr;
1763  uint32_t offset = t->pkt.offset;
1764  uint32_t length = t->pkt.length;
1765 
1766  uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1767  uint32_t header_id = ip->io.hdr.header_id[0];
1768  uint32_t struct_id = ip->io.hdr.struct_id[0];
1769  uint32_t n_bytes = ip->io.hdr.n_bytes[0];
1770 
1771  struct header_runtime *h = &t->headers[header_id];
1772 
1773  TRACE("[Thread %2u]: extract header %u (%u + %u bytes)\n",
1774  p->thread_id,
1775  header_id,
1776  n_bytes,
1777  n_bytes_last);
1778 
1779  n_bytes += n_bytes_last;
1780 
1781  /* Headers. */
1782  t->structs[struct_id] = ptr;
1783  t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1784  h->n_bytes = n_bytes;
1785 
1786  /* Packet. */
1787  t->pkt.offset = offset + n_bytes;
1788  t->pkt.length = length - n_bytes;
1789  t->ptr = ptr + n_bytes;
1790 }
1791 
1792 static inline void
1793 __instr_hdr_lookahead_exec(struct rte_swx_pipeline *p __rte_unused,
1794  struct thread *t,
1795  const struct instruction *ip)
1796 {
1797  uint64_t valid_headers = t->valid_headers;
1798  uint8_t *ptr = t->ptr;
1799 
1800  uint32_t header_id = ip->io.hdr.header_id[0];
1801  uint32_t struct_id = ip->io.hdr.struct_id[0];
1802 
1803  TRACE("[Thread %2u]: lookahead header %u\n",
1804  p->thread_id,
1805  header_id);
1806 
1807  /* Headers. */
1808  t->structs[struct_id] = ptr;
1809  t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1810 }
1811 
1812 /*
1813  * emit.
1814  */
1815 static inline void
1816 __instr_hdr_emit_many_exec(struct rte_swx_pipeline *p __rte_unused,
1817  struct thread *t,
1818  const struct instruction *ip,
1819  uint32_t n_emit)
1820 {
1821  uint64_t valid_headers = t->valid_headers;
1822  uint32_t n_headers_out = t->n_headers_out;
1823  struct header_out_runtime *ho = NULL;
1824  uint8_t *ho_ptr = NULL;
1825  uint32_t ho_nbytes = 0, i;
1826 
1827  for (i = 0; i < n_emit; i++) {
1828  uint32_t header_id = ip->io.hdr.header_id[i];
1829  uint32_t struct_id = ip->io.hdr.struct_id[i];
1830 
1831  struct header_runtime *hi = &t->headers[header_id];
1832  uint8_t *hi_ptr0 = hi->ptr0;
1833  uint32_t n_bytes = hi->n_bytes;
1834 
1835  uint8_t *hi_ptr = t->structs[struct_id];
1836 
1837  if (!MASK64_BIT_GET(valid_headers, header_id)) {
1838  TRACE("[Thread %2u]: emit header %u (invalid)\n",
1839  p->thread_id,
1840  header_id);
1841 
1842  continue;
1843  }
1844 
1845  TRACE("[Thread %2u]: emit header %u (valid)\n",
1846  p->thread_id,
1847  header_id);
1848 
1849  /* Headers. */
1850  if (!ho) {
1851  if (!n_headers_out) {
1852  ho = &t->headers_out[0];
1853 
1854  ho->ptr0 = hi_ptr0;
1855  ho->ptr = hi_ptr;
1856 
1857  ho_ptr = hi_ptr;
1858  ho_nbytes = n_bytes;
1859 
1860  n_headers_out = 1;
1861 
1862  continue;
1863  } else {
1864  ho = &t->headers_out[n_headers_out - 1];
1865 
1866  ho_ptr = ho->ptr;
1867  ho_nbytes = ho->n_bytes;
1868  }
1869  }
1870 
1871  if (ho_ptr + ho_nbytes == hi_ptr) {
1872  ho_nbytes += n_bytes;
1873  } else {
1874  ho->n_bytes = ho_nbytes;
1875 
1876  ho++;
1877  ho->ptr0 = hi_ptr0;
1878  ho->ptr = hi_ptr;
1879 
1880  ho_ptr = hi_ptr;
1881  ho_nbytes = n_bytes;
1882 
1883  n_headers_out++;
1884  }
1885  }
1886 
1887  if (ho)
1888  ho->n_bytes = ho_nbytes;
1889  t->n_headers_out = n_headers_out;
1890 }
1891 
1892 static inline void
1893 __instr_hdr_emit_exec(struct rte_swx_pipeline *p,
1894  struct thread *t,
1895  const struct instruction *ip)
1896 {
1897  __instr_hdr_emit_many_exec(p, t, ip, 1);
1898 }
1899 
1900 static inline void
1901 __instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p,
1902  struct thread *t,
1903  const struct instruction *ip)
1904 {
1905  TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
1906 
1907  __instr_hdr_emit_many_exec(p, t, ip, 1);
1908  __instr_tx_exec(p, t, ip);
1909 }
1910 
1911 static inline void
1912 __instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p,
1913  struct thread *t,
1914  const struct instruction *ip)
1915 {
1916  TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
1917 
1918  __instr_hdr_emit_many_exec(p, t, ip, 2);
1919  __instr_tx_exec(p, t, ip);
1920 }
1921 
1922 static inline void
1923 __instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p,
1924  struct thread *t,
1925  const struct instruction *ip)
1926 {
1927  TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
1928 
1929  __instr_hdr_emit_many_exec(p, t, ip, 3);
1930  __instr_tx_exec(p, t, ip);
1931 }
1932 
1933 static inline void
1934 __instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p,
1935  struct thread *t,
1936  const struct instruction *ip)
1937 {
1938  TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
1939 
1940  __instr_hdr_emit_many_exec(p, t, ip, 4);
1941  __instr_tx_exec(p, t, ip);
1942 }
1943 
1944 static inline void
1945 __instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p,
1946  struct thread *t,
1947  const struct instruction *ip)
1948 {
1949  TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
1950 
1951  __instr_hdr_emit_many_exec(p, t, ip, 5);
1952  __instr_tx_exec(p, t, ip);
1953 }
1954 
1955 static inline void
1956 __instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p,
1957  struct thread *t,
1958  const struct instruction *ip)
1959 {
1960  TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
1961 
1962  __instr_hdr_emit_many_exec(p, t, ip, 6);
1963  __instr_tx_exec(p, t, ip);
1964 }
1965 
1966 static inline void
1967 __instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p,
1968  struct thread *t,
1969  const struct instruction *ip)
1970 {
1971  TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
1972 
1973  __instr_hdr_emit_many_exec(p, t, ip, 7);
1974  __instr_tx_exec(p, t, ip);
1975 }
1976 
1977 static inline void
1978 __instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p,
1979  struct thread *t,
1980  const struct instruction *ip)
1981 {
1982  TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n", p->thread_id);
1983 
1984  __instr_hdr_emit_many_exec(p, t, ip, 8);
1985  __instr_tx_exec(p, t, ip);
1986 }
1987 
1988 /*
1989  * validate.
1990  */
1991 static inline void
1992 __instr_hdr_validate_exec(struct rte_swx_pipeline *p __rte_unused,
1993  struct thread *t,
1994  const struct instruction *ip)
1995 {
1996  uint32_t header_id = ip->valid.header_id;
1997 
1998  TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
1999 
2000  /* Headers. */
2001  t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2002 }
2003 
2004 /*
2005  * invalidate.
2006  */
2007 static inline void
2008 __instr_hdr_invalidate_exec(struct rte_swx_pipeline *p __rte_unused,
2009  struct thread *t,
2010  const struct instruction *ip)
2011 {
2012  uint32_t header_id = ip->valid.header_id;
2013 
2014  TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2015 
2016  /* Headers. */
2017  t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2018 }
2019 
2020 /*
2021  * learn.
2022  */
2023 static inline void
2024 __instr_learn_exec(struct rte_swx_pipeline *p,
2025  struct thread *t,
2026  const struct instruction *ip)
2027 {
2028  uint64_t action_id = ip->learn.action_id;
2029  uint32_t mf_offset = ip->learn.mf_offset;
2030  uint32_t learner_id = t->learner_id;
2031  struct rte_swx_table_state *ts = &t->table_state[p->n_tables +
2032  p->n_selectors + learner_id];
2033  struct learner_runtime *l = &t->learners[learner_id];
2034  struct learner_statistics *stats = &p->learner_stats[learner_id];
2035  uint32_t status;
2036 
2037  /* Table. */
2038  status = rte_swx_table_learner_add(ts->obj,
2039  l->mailbox,
2040  t->time,
2041  action_id,
2042  &t->metadata[mf_offset]);
2043 
2044  TRACE("[Thread %2u] learner %u learn %s\n",
2045  p->thread_id,
2046  learner_id,
2047  status ? "ok" : "error");
2048 
2049  stats->n_pkts_learn[status] += 1;
2050 }
2051 
2052 /*
2053  * forget.
2054  */
2055 static inline void
2056 __instr_forget_exec(struct rte_swx_pipeline *p,
2057  struct thread *t,
2058  const struct instruction *ip __rte_unused)
2059 {
2060  uint32_t learner_id = t->learner_id;
2061  struct rte_swx_table_state *ts = &t->table_state[p->n_tables +
2062  p->n_selectors + learner_id];
2063  struct learner_runtime *l = &t->learners[learner_id];
2064  struct learner_statistics *stats = &p->learner_stats[learner_id];
2065 
2066  /* Table. */
2067  rte_swx_table_learner_delete(ts->obj, l->mailbox);
2068 
2069  TRACE("[Thread %2u] learner %u forget\n",
2070  p->thread_id,
2071  learner_id);
2072 
2073  stats->n_pkts_forget += 1;
2074 }
2075 
2076 /*
2077  * extern.
2078  */
2079 static inline uint32_t
2080 __instr_extern_obj_exec(struct rte_swx_pipeline *p __rte_unused,
2081  struct thread *t,
2082  const struct instruction *ip)
2083 {
2084  uint32_t obj_id = ip->ext_obj.ext_obj_id;
2085  uint32_t func_id = ip->ext_obj.func_id;
2086  struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
2087  rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
2088  uint32_t done;
2089 
2090  TRACE("[Thread %2u] extern obj %u member func %u\n",
2091  p->thread_id,
2092  obj_id,
2093  func_id);
2094 
2095  done = func(obj->obj, obj->mailbox);
2096 
2097  return done;
2098 }
2099 
2100 static inline uint32_t
2101 __instr_extern_func_exec(struct rte_swx_pipeline *p __rte_unused,
2102  struct thread *t,
2103  const struct instruction *ip)
2104 {
2105  uint32_t ext_func_id = ip->ext_func.ext_func_id;
2106  struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
2107  rte_swx_extern_func_t func = ext_func->func;
2108  uint32_t done;
2109 
2110  TRACE("[Thread %2u] extern func %u\n",
2111  p->thread_id,
2112  ext_func_id);
2113 
2114  done = func(ext_func->mailbox);
2115 
2116  return done;
2117 }
2118 
2119 /*
2120  * mov.
2121  */
2122 static inline void
2123 __instr_mov_exec(struct rte_swx_pipeline *p __rte_unused,
2124  struct thread *t,
2125  const struct instruction *ip)
2126 {
2127  TRACE("[Thread %2u] mov\n", p->thread_id);
2128 
2129  MOV(t, ip);
2130 }
2131 
2132 static inline void
2133 __instr_mov_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2134  struct thread *t,
2135  const struct instruction *ip)
2136 {
2137  TRACE("[Thread %2u] mov (mh)\n", p->thread_id);
2138 
2139  MOV_MH(t, ip);
2140 }
2141 
2142 static inline void
2143 __instr_mov_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2144  struct thread *t,
2145  const struct instruction *ip)
2146 {
2147  TRACE("[Thread %2u] mov (hm)\n", p->thread_id);
2148 
2149  MOV_HM(t, ip);
2150 }
2151 
2152 static inline void
2153 __instr_mov_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2154  struct thread *t,
2155  const struct instruction *ip)
2156 {
2157  TRACE("[Thread %2u] mov (hh)\n", p->thread_id);
2158 
2159  MOV_HH(t, ip);
2160 }
2161 
2162 static inline void
2163 __instr_mov_i_exec(struct rte_swx_pipeline *p __rte_unused,
2164  struct thread *t,
2165  const struct instruction *ip)
2166 {
2167  TRACE("[Thread %2u] mov m.f %" PRIx64 "\n", p->thread_id, ip->mov.src_val);
2168 
2169  MOV_I(t, ip);
2170 }
2171 
2172 /*
2173  * dma.
2174  */
2175 static inline void
2176 __instr_dma_ht_many_exec(struct rte_swx_pipeline *p __rte_unused,
2177  struct thread *t,
2178  const struct instruction *ip,
2179  uint32_t n_dma)
2180 {
2181  uint8_t *action_data = t->structs[0];
2182  uint64_t valid_headers = t->valid_headers;
2183  uint32_t i;
2184 
2185  for (i = 0; i < n_dma; i++) {
2186  uint32_t header_id = ip->dma.dst.header_id[i];
2187  uint32_t struct_id = ip->dma.dst.struct_id[i];
2188  uint32_t offset = ip->dma.src.offset[i];
2189  uint32_t n_bytes = ip->dma.n_bytes[i];
2190 
2191  struct header_runtime *h = &t->headers[header_id];
2192  uint8_t *h_ptr0 = h->ptr0;
2193  uint8_t *h_ptr = t->structs[struct_id];
2194 
2195  void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2196  h_ptr : h_ptr0;
2197  void *src = &action_data[offset];
2198 
2199  TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2200 
2201  /* Headers. */
2202  memcpy(dst, src, n_bytes);
2203  t->structs[struct_id] = dst;
2204  valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2205  }
2206 
2207  t->valid_headers = valid_headers;
2208 }
2209 
2210 static inline void
2211 __instr_dma_ht_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2212 {
2213  __instr_dma_ht_many_exec(p, t, ip, 1);
2214 }
2215 
2216 static inline void
2217 __instr_dma_ht2_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2218 {
2219  TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2220 
2221  __instr_dma_ht_many_exec(p, t, ip, 2);
2222 }
2223 
2224 static inline void
2225 __instr_dma_ht3_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2226 {
2227  TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2228 
2229  __instr_dma_ht_many_exec(p, t, ip, 3);
2230 }
2231 
2232 static inline void
2233 __instr_dma_ht4_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2234 {
2235  TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2236 
2237  __instr_dma_ht_many_exec(p, t, ip, 4);
2238 }
2239 
2240 static inline void
2241 __instr_dma_ht5_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2242 {
2243  TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2244 
2245  __instr_dma_ht_many_exec(p, t, ip, 5);
2246 }
2247 
2248 static inline void
2249 __instr_dma_ht6_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2250 {
2251  TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2252 
2253  __instr_dma_ht_many_exec(p, t, ip, 6);
2254 }
2255 
2256 static inline void
2257 __instr_dma_ht7_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2258 {
2259  TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2260 
2261  __instr_dma_ht_many_exec(p, t, ip, 7);
2262 }
2263 
2264 static inline void
2265 __instr_dma_ht8_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2266 {
2267  TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2268 
2269  __instr_dma_ht_many_exec(p, t, ip, 8);
2270 }
2271 
2272 /*
2273  * alu.
2274  */
2275 static inline void
2276 __instr_alu_add_exec(struct rte_swx_pipeline *p __rte_unused,
2277  struct thread *t,
2278  const struct instruction *ip)
2279 {
2280  TRACE("[Thread %2u] add\n", p->thread_id);
2281 
2282  ALU(t, ip, +);
2283 }
2284 
2285 static inline void
2286 __instr_alu_add_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2287  struct thread *t,
2288  const struct instruction *ip)
2289 {
2290  TRACE("[Thread %2u] add (mh)\n", p->thread_id);
2291 
2292  ALU_MH(t, ip, +);
2293 }
2294 
2295 static inline void
2296 __instr_alu_add_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2297  struct thread *t,
2298  const struct instruction *ip)
2299 {
2300  TRACE("[Thread %2u] add (hm)\n", p->thread_id);
2301 
2302  ALU_HM(t, ip, +);
2303 }
2304 
2305 static inline void
2306 __instr_alu_add_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2307  struct thread *t,
2308  const struct instruction *ip)
2309 {
2310  TRACE("[Thread %2u] add (hh)\n", p->thread_id);
2311 
2312  ALU_HH(t, ip, +);
2313 }
2314 
2315 static inline void
2316 __instr_alu_add_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2317  struct thread *t,
2318  const struct instruction *ip)
2319 {
2320  TRACE("[Thread %2u] add (mi)\n", p->thread_id);
2321 
2322  ALU_MI(t, ip, +);
2323 }
2324 
2325 static inline void
2326 __instr_alu_add_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2327  struct thread *t,
2328  const struct instruction *ip)
2329 {
2330  TRACE("[Thread %2u] add (hi)\n", p->thread_id);
2331 
2332  ALU_HI(t, ip, +);
2333 }
2334 
2335 static inline void
2336 __instr_alu_sub_exec(struct rte_swx_pipeline *p __rte_unused,
2337  struct thread *t,
2338  const struct instruction *ip)
2339 {
2340  TRACE("[Thread %2u] sub\n", p->thread_id);
2341 
2342  ALU(t, ip, -);
2343 }
2344 
2345 static inline void
2346 __instr_alu_sub_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2347  struct thread *t,
2348  const struct instruction *ip)
2349 {
2350  TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
2351 
2352  ALU_MH(t, ip, -);
2353 }
2354 
2355 static inline void
2356 __instr_alu_sub_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2357  struct thread *t,
2358  const struct instruction *ip)
2359 {
2360  TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
2361 
2362  ALU_HM(t, ip, -);
2363 }
2364 
2365 static inline void
2366 __instr_alu_sub_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2367  struct thread *t,
2368  const struct instruction *ip)
2369 {
2370  TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
2371 
2372  ALU_HH(t, ip, -);
2373 }
2374 
2375 static inline void
2376 __instr_alu_sub_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2377  struct thread *t,
2378  const struct instruction *ip)
2379 {
2380  TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
2381 
2382  ALU_MI(t, ip, -);
2383 }
2384 
2385 static inline void
2386 __instr_alu_sub_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2387  struct thread *t,
2388  const struct instruction *ip)
2389 {
2390  TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
2391 
2392  ALU_HI(t, ip, -);
2393 }
2394 
2395 static inline void
2396 __instr_alu_shl_exec(struct rte_swx_pipeline *p __rte_unused,
2397  struct thread *t,
2398  const struct instruction *ip)
2399 {
2400  TRACE("[Thread %2u] shl\n", p->thread_id);
2401 
2402  ALU(t, ip, <<);
2403 }
2404 
2405 static inline void
2406 __instr_alu_shl_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2407  struct thread *t,
2408  const struct instruction *ip)
2409 {
2410  TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
2411 
2412  ALU_MH(t, ip, <<);
2413 }
2414 
2415 static inline void
2416 __instr_alu_shl_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2417  struct thread *t,
2418  const struct instruction *ip)
2419 {
2420  TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
2421 
2422  ALU_HM(t, ip, <<);
2423 }
2424 
2425 static inline void
2426 __instr_alu_shl_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2427  struct thread *t,
2428  const struct instruction *ip)
2429 {
2430  TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
2431 
2432  ALU_HH(t, ip, <<);
2433 }
2434 
2435 static inline void
2436 __instr_alu_shl_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2437  struct thread *t,
2438  const struct instruction *ip)
2439 {
2440  TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
2441 
2442  ALU_MI(t, ip, <<);
2443 }
2444 
2445 static inline void
2446 __instr_alu_shl_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2447  struct thread *t,
2448  const struct instruction *ip)
2449 {
2450  TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
2451 
2452  ALU_HI(t, ip, <<);
2453 }
2454 
2455 static inline void
2456 __instr_alu_shr_exec(struct rte_swx_pipeline *p __rte_unused,
2457  struct thread *t,
2458  const struct instruction *ip)
2459 {
2460  TRACE("[Thread %2u] shr\n", p->thread_id);
2461 
2462  ALU(t, ip, >>);
2463 }
2464 
2465 static inline void
2466 __instr_alu_shr_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2467  struct thread *t,
2468  const struct instruction *ip)
2469 {
2470  TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
2471 
2472  ALU_MH(t, ip, >>);
2473 }
2474 
2475 static inline void
2476 __instr_alu_shr_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2477  struct thread *t,
2478  const struct instruction *ip)
2479 {
2480  TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
2481 
2482  ALU_HM(t, ip, >>);
2483 }
2484 
2485 static inline void
2486 __instr_alu_shr_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2487  struct thread *t,
2488  const struct instruction *ip)
2489 {
2490  TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
2491 
2492  ALU_HH(t, ip, >>);
2493 }
2494 
2495 static inline void
2496 __instr_alu_shr_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2497  struct thread *t,
2498  const struct instruction *ip)
2499 {
2500  TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
2501 
2502  /* Structs. */
2503  ALU_MI(t, ip, >>);
2504 }
2505 
2506 static inline void
2507 __instr_alu_shr_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2508  struct thread *t,
2509  const struct instruction *ip)
2510 {
2511  TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
2512 
2513  ALU_HI(t, ip, >>);
2514 }
2515 
2516 static inline void
2517 __instr_alu_and_exec(struct rte_swx_pipeline *p __rte_unused,
2518  struct thread *t,
2519  const struct instruction *ip)
2520 {
2521  TRACE("[Thread %2u] and\n", p->thread_id);
2522 
2523  ALU(t, ip, &);
2524 }
2525 
2526 static inline void
2527 __instr_alu_and_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2528  struct thread *t,
2529  const struct instruction *ip)
2530 {
2531  TRACE("[Thread %2u] and (mh)\n", p->thread_id);
2532 
2533  ALU_MH(t, ip, &);
2534 }
2535 
2536 static inline void
2537 __instr_alu_and_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2538  struct thread *t,
2539  const struct instruction *ip)
2540 {
2541  TRACE("[Thread %2u] and (hm)\n", p->thread_id);
2542 
2543  ALU_HM_FAST(t, ip, &);
2544 }
2545 
2546 static inline void
2547 __instr_alu_and_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2548  struct thread *t,
2549  const struct instruction *ip)
2550 {
2551  TRACE("[Thread %2u] and (hh)\n", p->thread_id);
2552 
2553  ALU_HH_FAST(t, ip, &);
2554 }
2555 
2556 static inline void
2557 __instr_alu_and_i_exec(struct rte_swx_pipeline *p __rte_unused,
2558  struct thread *t,
2559  const struct instruction *ip)
2560 {
2561  TRACE("[Thread %2u] and (i)\n", p->thread_id);
2562 
2563  ALU_I(t, ip, &);
2564 }
2565 
2566 static inline void
2567 __instr_alu_or_exec(struct rte_swx_pipeline *p __rte_unused,
2568  struct thread *t,
2569  const struct instruction *ip)
2570 {
2571  TRACE("[Thread %2u] or\n", p->thread_id);
2572 
2573  ALU(t, ip, |);
2574 }
2575 
2576 static inline void
2577 __instr_alu_or_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2578  struct thread *t,
2579  const struct instruction *ip)
2580 {
2581  TRACE("[Thread %2u] or (mh)\n", p->thread_id);
2582 
2583  ALU_MH(t, ip, |);
2584 }
2585 
2586 static inline void
2587 __instr_alu_or_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2588  struct thread *t,
2589  const struct instruction *ip)
2590 {
2591  TRACE("[Thread %2u] or (hm)\n", p->thread_id);
2592 
2593  ALU_HM_FAST(t, ip, |);
2594 }
2595 
2596 static inline void
2597 __instr_alu_or_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2598  struct thread *t,
2599  const struct instruction *ip)
2600 {
2601  TRACE("[Thread %2u] or (hh)\n", p->thread_id);
2602 
2603  ALU_HH_FAST(t, ip, |);
2604 }
2605 
2606 static inline void
2607 __instr_alu_or_i_exec(struct rte_swx_pipeline *p __rte_unused,
2608  struct thread *t,
2609  const struct instruction *ip)
2610 {
2611  TRACE("[Thread %2u] or (i)\n", p->thread_id);
2612 
2613  ALU_I(t, ip, |);
2614 }
2615 
2616 static inline void
2617 __instr_alu_xor_exec(struct rte_swx_pipeline *p __rte_unused,
2618  struct thread *t,
2619  const struct instruction *ip)
2620 {
2621  TRACE("[Thread %2u] xor\n", p->thread_id);
2622 
2623  ALU(t, ip, ^);
2624 }
2625 
2626 static inline void
2627 __instr_alu_xor_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2628  struct thread *t,
2629  const struct instruction *ip)
2630 {
2631  TRACE("[Thread %2u] xor (mh)\n", p->thread_id);
2632 
2633  ALU_MH(t, ip, ^);
2634 }
2635 
2636 static inline void
2637 __instr_alu_xor_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2638  struct thread *t,
2639  const struct instruction *ip)
2640 {
2641  TRACE("[Thread %2u] xor (hm)\n", p->thread_id);
2642 
2643  ALU_HM_FAST(t, ip, ^);
2644 }
2645 
2646 static inline void
2647 __instr_alu_xor_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2648  struct thread *t,
2649  const struct instruction *ip)
2650 {
2651  TRACE("[Thread %2u] xor (hh)\n", p->thread_id);
2652 
2653  ALU_HH_FAST(t, ip, ^);
2654 }
2655 
2656 static inline void
2657 __instr_alu_xor_i_exec(struct rte_swx_pipeline *p __rte_unused,
2658  struct thread *t,
2659  const struct instruction *ip)
2660 {
2661  TRACE("[Thread %2u] xor (i)\n", p->thread_id);
2662 
2663  ALU_I(t, ip, ^);
2664 }
2665 
2666 static inline void
2667 __instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p __rte_unused,
2668  struct thread *t,
2669  const struct instruction *ip)
2670 {
2671  uint8_t *dst_struct, *src_struct;
2672  uint16_t *dst16_ptr, dst;
2673  uint64_t *src64_ptr, src64, src64_mask, src;
2674  uint64_t r;
2675 
2676  TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
2677 
2678  /* Structs. */
2679  dst_struct = t->structs[ip->alu.dst.struct_id];
2680  dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2681  dst = *dst16_ptr;
2682 
2683  src_struct = t->structs[ip->alu.src.struct_id];
2684  src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
2685  src64 = *src64_ptr;
2686  src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
2687  src = src64 & src64_mask;
2688 
2689  r = dst;
2690  r = ~r & 0xFFFF;
2691 
2692  /* The first input (r) is a 16-bit number. The second and the third
2693  * inputs are 32-bit numbers. In the worst case scenario, the sum of the
2694  * three numbers (output r) is a 34-bit number.
2695  */
2696  r += (src >> 32) + (src & 0xFFFFFFFF);
2697 
2698  /* The first input is a 16-bit number. The second input is an 18-bit
2699  * number. In the worst case scenario, the sum of the two numbers is a
2700  * 19-bit number.
2701  */
2702  r = (r & 0xFFFF) + (r >> 16);
2703 
2704  /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
2705  * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
2706  */
2707  r = (r & 0xFFFF) + (r >> 16);
2708 
2709  /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
2710  * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
2711  * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
2712  * therefore the output r is always a 16-bit number.
2713  */
2714  r = (r & 0xFFFF) + (r >> 16);
2715 
2716  r = ~r & 0xFFFF;
2717  r = r ? r : 0xFFFF;
2718 
2719  *dst16_ptr = (uint16_t)r;
2720 }
2721 
2722 static inline void
2723 __instr_alu_cksub_field_exec(struct rte_swx_pipeline *p __rte_unused,
2724  struct thread *t,
2725  const struct instruction *ip)
2726 {
2727  uint8_t *dst_struct, *src_struct;
2728  uint16_t *dst16_ptr, dst;
2729  uint64_t *src64_ptr, src64, src64_mask, src;
2730  uint64_t r;
2731 
2732  TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
2733 
2734  /* Structs. */
2735  dst_struct = t->structs[ip->alu.dst.struct_id];
2736  dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2737  dst = *dst16_ptr;
2738 
2739  src_struct = t->structs[ip->alu.src.struct_id];
2740  src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
2741  src64 = *src64_ptr;
2742  src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
2743  src = src64 & src64_mask;
2744 
2745  r = dst;
2746  r = ~r & 0xFFFF;
2747 
2748  /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
2749  * the following sequence of operations in 2's complement arithmetic:
2750  * a '- b = (a - b) % 0xFFFF.
2751  *
2752  * In order to prevent an underflow for the below subtraction, in which
2753  * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
2754  * minuend), we first add a multiple of the 0xFFFF modulus to the
2755  * minuend. The number we add to the minuend needs to be a 34-bit number
2756  * or higher, so for readability reasons we picked the 36-bit multiple.
2757  * We are effectively turning the 16-bit minuend into a 36-bit number:
2758  * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
2759  */
2760  r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
2761 
2762  /* A 33-bit number is subtracted from a 36-bit number (the input r). The
2763  * result (the output r) is a 36-bit number.
2764  */
2765  r -= (src >> 32) + (src & 0xFFFFFFFF);
2766 
2767  /* The first input is a 16-bit number. The second input is a 20-bit
2768  * number. Their sum is a 21-bit number.
2769  */
2770  r = (r & 0xFFFF) + (r >> 16);
2771 
2772  /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
2773  * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
2774  */
2775  r = (r & 0xFFFF) + (r >> 16);
2776 
2777  /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
2778  * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
2779  * 0x1001E), the output r is (0 .. 31). So no carry bit can be
2780  * generated, therefore the output r is always a 16-bit number.
2781  */
2782  r = (r & 0xFFFF) + (r >> 16);
2783 
2784  r = ~r & 0xFFFF;
2785  r = r ? r : 0xFFFF;
2786 
2787  *dst16_ptr = (uint16_t)r;
2788 }
2789 
2790 static inline void
2791 __instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p __rte_unused,
2792  struct thread *t,
2793  const struct instruction *ip)
2794 {
2795  uint8_t *dst_struct, *src_struct;
2796  uint16_t *dst16_ptr;
2797  uint32_t *src32_ptr;
2798  uint64_t r0, r1;
2799 
2800  TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
2801 
2802  /* Structs. */
2803  dst_struct = t->structs[ip->alu.dst.struct_id];
2804  dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2805 
2806  src_struct = t->structs[ip->alu.src.struct_id];
2807  src32_ptr = (uint32_t *)&src_struct[0];
2808 
2809  r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
2810  r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
2811  r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
2812  r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
2813  r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
2814 
2815  /* The first input is a 16-bit number. The second input is a 19-bit
2816  * number. Their sum is a 20-bit number.
2817  */
2818  r0 = (r0 & 0xFFFF) + (r0 >> 16);
2819 
2820  /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
2821  * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
2822  */
2823  r0 = (r0 & 0xFFFF) + (r0 >> 16);
2824 
2825  /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
2826  * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
2827  * 0x1000E), the output r is (0 .. 15). So no carry bit can be
2828  * generated, therefore the output r is always a 16-bit number.
2829  */
2830  r0 = (r0 & 0xFFFF) + (r0 >> 16);
2831 
2832  r0 = ~r0 & 0xFFFF;
2833  r0 = r0 ? r0 : 0xFFFF;
2834 
2835  *dst16_ptr = (uint16_t)r0;
2836 }
2837 
2838 static inline void
2839 __instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p __rte_unused,
2840  struct thread *t,
2841  const struct instruction *ip)
2842 {
2843  uint8_t *dst_struct, *src_struct;
2844  uint16_t *dst16_ptr;
2845  uint32_t *src32_ptr;
2846  uint64_t r = 0;
2847  uint32_t i;
2848 
2849  TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
2850 
2851  /* Structs. */
2852  dst_struct = t->structs[ip->alu.dst.struct_id];
2853  dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2854 
2855  src_struct = t->structs[ip->alu.src.struct_id];
2856  src32_ptr = (uint32_t *)&src_struct[0];
2857 
2858  /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
2859  * Therefore, in the worst case scenario, a 35-bit number is added to a
2860  * 16-bit number (the input r), so the output r is 36-bit number.
2861  */
2862  for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
2863  r += *src32_ptr;
2864 
2865  /* The first input is a 16-bit number. The second input is a 20-bit
2866  * number. Their sum is a 21-bit number.
2867  */
2868  r = (r & 0xFFFF) + (r >> 16);
2869 
2870  /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
2871  * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
2872  */
2873  r = (r & 0xFFFF) + (r >> 16);
2874 
2875  /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
2876  * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
2877  * 0x1001E), the output r is (0 .. 31). So no carry bit can be
2878  * generated, therefore the output r is always a 16-bit number.
2879  */
2880  r = (r & 0xFFFF) + (r >> 16);
2881 
2882  r = ~r & 0xFFFF;
2883  r = r ? r : 0xFFFF;
2884 
2885  *dst16_ptr = (uint16_t)r;
2886 }
2887 
2888 /*
2889  * Register array.
2890  */
2891 static inline uint64_t *
2892 instr_regarray_regarray(struct rte_swx_pipeline *p, const struct instruction *ip)
2893 {
2894  struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2895  return r->regarray;
2896 }
2897 
2898 static inline uint64_t
2899 instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2900 {
2901  struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2902 
2903  uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
2904  uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
2905  uint64_t idx64 = *idx64_ptr;
2906  uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
2907  uint64_t idx = idx64 & idx64_mask & r->size_mask;
2908 
2909  return idx;
2910 }
2911 
2912 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2913 
2914 static inline uint64_t
2915 instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2916 {
2917  struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2918 
2919  uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
2920  uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
2921  uint64_t idx64 = *idx64_ptr;
2922  uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
2923 
2924  return idx;
2925 }
2926 
2927 #else
2928 
2929 #define instr_regarray_idx_nbo instr_regarray_idx_hbo
2930 
2931 #endif
2932 
2933 static inline uint64_t
2934 instr_regarray_idx_imm(struct rte_swx_pipeline *p, const struct instruction *ip)
2935 {
2936  struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2937 
2938  uint64_t idx = ip->regarray.idx_val & r->size_mask;
2939 
2940  return idx;
2941 }
2942 
2943 static inline uint64_t
2944 instr_regarray_src_hbo(struct thread *t, const struct instruction *ip)
2945 {
2946  uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
2947  uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
2948  uint64_t src64 = *src64_ptr;
2949  uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
2950  uint64_t src = src64 & src64_mask;
2951 
2952  return src;
2953 }
2954 
2955 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2956 
2957 static inline uint64_t
2958 instr_regarray_src_nbo(struct thread *t, const struct instruction *ip)
2959 {
2960  uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
2961  uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
2962  uint64_t src64 = *src64_ptr;
2963  uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
2964 
2965  return src;
2966 }
2967 
2968 #else
2969 
2970 #define instr_regarray_src_nbo instr_regarray_src_hbo
2971 
2972 #endif
2973 
2974 static inline void
2975 instr_regarray_dst_hbo_src_hbo_set(struct thread *t, const struct instruction *ip, uint64_t src)
2976 {
2977  uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
2978  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
2979  uint64_t dst64 = *dst64_ptr;
2980  uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
2981 
2982  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
2983 
2984 }
2985 
2986 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2987 
2988 static inline void
2989 instr_regarray_dst_nbo_src_hbo_set(struct thread *t, const struct instruction *ip, uint64_t src)
2990 {
2991  uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
2992  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
2993  uint64_t dst64 = *dst64_ptr;
2994  uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
2995 
2996  src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
2997  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
2998 }
2999 
3000 #else
3001 
3002 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
3003 
3004 #endif
3005 
3006 static inline void
3007 __instr_regprefetch_rh_exec(struct rte_swx_pipeline *p,
3008  struct thread *t,
3009  const struct instruction *ip)
3010 {
3011  uint64_t *regarray, idx;
3012 
3013  TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id);
3014 
3015  regarray = instr_regarray_regarray(p, ip);
3016  idx = instr_regarray_idx_nbo(p, t, ip);
3017  rte_prefetch0(&regarray[idx]);
3018 }
3019 
3020 static inline void
3021 __instr_regprefetch_rm_exec(struct rte_swx_pipeline *p,
3022  struct thread *t,
3023  const struct instruction *ip)
3024 {
3025  uint64_t *regarray, idx;
3026 
3027  TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id);
3028 
3029  regarray = instr_regarray_regarray(p, ip);
3030  idx = instr_regarray_idx_hbo(p, t, ip);
3031  rte_prefetch0(&regarray[idx]);
3032 }
3033 
3034 static inline void
3035 __instr_regprefetch_ri_exec(struct rte_swx_pipeline *p,
3036  struct thread *t __rte_unused,
3037  const struct instruction *ip)
3038 {
3039  uint64_t *regarray, idx;
3040 
3041  TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id);
3042 
3043  regarray = instr_regarray_regarray(p, ip);
3044  idx = instr_regarray_idx_imm(p, ip);
3045  rte_prefetch0(&regarray[idx]);
3046 }
3047 
3048 static inline void
3049 __instr_regrd_hrh_exec(struct rte_swx_pipeline *p,
3050  struct thread *t,
3051  const struct instruction *ip)
3052 {
3053  uint64_t *regarray, idx;
3054 
3055  TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id);
3056 
3057  regarray = instr_regarray_regarray(p, ip);
3058  idx = instr_regarray_idx_nbo(p, t, ip);
3059  instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3060 }
3061 
3062 static inline void
3063 __instr_regrd_hrm_exec(struct rte_swx_pipeline *p,
3064  struct thread *t,
3065  const struct instruction *ip)
3066 {
3067  uint64_t *regarray, idx;
3068 
3069  TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id);
3070 
3071  /* Structs. */
3072  regarray = instr_regarray_regarray(p, ip);
3073  idx = instr_regarray_idx_hbo(p, t, ip);
3074  instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3075 }
3076 
3077 static inline void
3078 __instr_regrd_mrh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3079 {
3080  uint64_t *regarray, idx;
3081 
3082  TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id);
3083 
3084  regarray = instr_regarray_regarray(p, ip);
3085  idx = instr_regarray_idx_nbo(p, t, ip);
3086  instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3087 }
3088 
3089 static inline void
3090 __instr_regrd_mrm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3091 {
3092  uint64_t *regarray, idx;
3093 
3094  TRACE("[Thread %2u] regrd (m = r[m])\n", p->thread_id);
3095 
3096  regarray = instr_regarray_regarray(p, ip);
3097  idx = instr_regarray_idx_hbo(p, t, ip);
3098  instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3099 }
3100 
3101 static inline void
3102 __instr_regrd_hri_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3103 {
3104  uint64_t *regarray, idx;
3105 
3106  TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id);
3107 
3108  regarray = instr_regarray_regarray(p, ip);
3109  idx = instr_regarray_idx_imm(p, ip);
3110  instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3111 }
3112 
3113 static inline void
3114 __instr_regrd_mri_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3115 {
3116  uint64_t *regarray, idx;
3117 
3118  TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id);
3119 
3120  regarray = instr_regarray_regarray(p, ip);
3121  idx = instr_regarray_idx_imm(p, ip);
3122  instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3123 }
3124 
3125 static inline void
3126 __instr_regwr_rhh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3127 {
3128  uint64_t *regarray, idx, src;
3129 
3130  TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
3131 
3132  regarray = instr_regarray_regarray(p, ip);
3133  idx = instr_regarray_idx_nbo(p, t, ip);
3134  src = instr_regarray_src_nbo(t, ip);
3135  regarray[idx] = src;
3136 }
3137 
3138 static inline void
3139 __instr_regwr_rhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3140 {
3141  uint64_t *regarray, idx, src;
3142 
3143  TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
3144 
3145  regarray = instr_regarray_regarray(p, ip);
3146  idx = instr_regarray_idx_nbo(p, t, ip);
3147  src = instr_regarray_src_hbo(t, ip);
3148  regarray[idx] = src;
3149 }
3150 
3151 static inline void
3152 __instr_regwr_rmh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3153 {
3154  uint64_t *regarray, idx, src;
3155 
3156  TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
3157 
3158  regarray = instr_regarray_regarray(p, ip);
3159  idx = instr_regarray_idx_hbo(p, t, ip);
3160  src = instr_regarray_src_nbo(t, ip);
3161  regarray[idx] = src;
3162 }
3163 
3164 static inline void
3165 __instr_regwr_rmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3166 {
3167  uint64_t *regarray, idx, src;
3168 
3169  TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
3170 
3171  regarray = instr_regarray_regarray(p, ip);
3172  idx = instr_regarray_idx_hbo(p, t, ip);
3173  src = instr_regarray_src_hbo(t, ip);
3174  regarray[idx] = src;
3175 }
3176 
3177 static inline void
3178 __instr_regwr_rhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3179 {
3180  uint64_t *regarray, idx, src;
3181 
3182  TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
3183 
3184  regarray = instr_regarray_regarray(p, ip);
3185  idx = instr_regarray_idx_nbo(p, t, ip);
3186  src = ip->regarray.dstsrc_val;
3187  regarray[idx] = src;
3188 }
3189 
3190 static inline void
3191 __instr_regwr_rmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3192 {
3193  uint64_t *regarray, idx, src;
3194 
3195  TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
3196 
3197  regarray = instr_regarray_regarray(p, ip);
3198  idx = instr_regarray_idx_hbo(p, t, ip);
3199  src = ip->regarray.dstsrc_val;
3200  regarray[idx] = src;
3201 }
3202 
3203 static inline void
3204 __instr_regwr_rih_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3205 {
3206  uint64_t *regarray, idx, src;
3207 
3208  TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
3209 
3210  regarray = instr_regarray_regarray(p, ip);
3211  idx = instr_regarray_idx_imm(p, ip);
3212  src = instr_regarray_src_nbo(t, ip);
3213  regarray[idx] = src;
3214 }
3215 
3216 static inline void
3217 __instr_regwr_rim_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3218 {
3219  uint64_t *regarray, idx, src;
3220 
3221  TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
3222 
3223  regarray = instr_regarray_regarray(p, ip);
3224  idx = instr_regarray_idx_imm(p, ip);
3225  src = instr_regarray_src_hbo(t, ip);
3226  regarray[idx] = src;
3227 }
3228 
3229 static inline void
3230 __instr_regwr_rii_exec(struct rte_swx_pipeline *p,
3231  struct thread *t __rte_unused,
3232  const struct instruction *ip)
3233 {
3234  uint64_t *regarray, idx, src;
3235 
3236  TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
3237 
3238  regarray = instr_regarray_regarray(p, ip);
3239  idx = instr_regarray_idx_imm(p, ip);
3240  src = ip->regarray.dstsrc_val;
3241  regarray[idx] = src;
3242 }
3243 
3244 static inline void
3245 __instr_regadd_rhh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3246 {
3247  uint64_t *regarray, idx, src;
3248 
3249  TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
3250 
3251  regarray = instr_regarray_regarray(p, ip);
3252  idx = instr_regarray_idx_nbo(p, t, ip);
3253  src = instr_regarray_src_nbo(t, ip);
3254  regarray[idx] += src;
3255 }
3256 
3257 static inline void
3258 __instr_regadd_rhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3259 {
3260  uint64_t *regarray, idx, src;
3261 
3262  TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
3263 
3264  regarray = instr_regarray_regarray(p, ip);
3265  idx = instr_regarray_idx_nbo(p, t, ip);
3266  src = instr_regarray_src_hbo(t, ip);
3267  regarray[idx] += src;
3268 }
3269 
3270 static inline void
3271 __instr_regadd_rmh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3272 {
3273  uint64_t *regarray, idx, src;
3274 
3275  TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
3276 
3277  regarray = instr_regarray_regarray(p, ip);
3278  idx = instr_regarray_idx_hbo(p, t, ip);
3279  src = instr_regarray_src_nbo(t, ip);
3280  regarray[idx] += src;
3281 }
3282 
3283 static inline void
3284 __instr_regadd_rmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3285 {
3286  uint64_t *regarray, idx, src;
3287 
3288  TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
3289 
3290  regarray = instr_regarray_regarray(p, ip);
3291  idx = instr_regarray_idx_hbo(p, t, ip);
3292  src = instr_regarray_src_hbo(t, ip);
3293  regarray[idx] += src;
3294 }
3295 
3296 static inline void
3297 __instr_regadd_rhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3298 {
3299  uint64_t *regarray, idx, src;
3300 
3301  TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
3302 
3303  regarray = instr_regarray_regarray(p, ip);
3304  idx = instr_regarray_idx_nbo(p, t, ip);
3305  src = ip->regarray.dstsrc_val;
3306  regarray[idx] += src;
3307 }
3308 
3309 static inline void
3310 __instr_regadd_rmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3311 {
3312  uint64_t *regarray, idx, src;
3313 
3314  TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
3315 
3316  regarray = instr_regarray_regarray(p, ip);
3317  idx = instr_regarray_idx_hbo(p, t, ip);
3318  src = ip->regarray.dstsrc_val;
3319  regarray[idx] += src;
3320 }
3321 
3322 static inline void
3323 __instr_regadd_rih_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3324 {
3325  uint64_t *regarray, idx, src;
3326 
3327  TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
3328 
3329  regarray = instr_regarray_regarray(p, ip);
3330  idx = instr_regarray_idx_imm(p, ip);
3331  src = instr_regarray_src_nbo(t, ip);
3332  regarray[idx] += src;
3333 }
3334 
3335 static inline void
3336 __instr_regadd_rim_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3337 {
3338  uint64_t *regarray, idx, src;
3339 
3340  TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
3341 
3342  regarray = instr_regarray_regarray(p, ip);
3343  idx = instr_regarray_idx_imm(p, ip);
3344  src = instr_regarray_src_hbo(t, ip);
3345  regarray[idx] += src;
3346 }
3347 
3348 static inline void
3349 __instr_regadd_rii_exec(struct rte_swx_pipeline *p,
3350  struct thread *t __rte_unused,
3351  const struct instruction *ip)
3352 {
3353  uint64_t *regarray, idx, src;
3354 
3355  TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
3356 
3357  regarray = instr_regarray_regarray(p, ip);
3358  idx = instr_regarray_idx_imm(p, ip);
3359  src = ip->regarray.dstsrc_val;
3360  regarray[idx] += src;
3361 }
3362 
3363 /*
3364  * metarray.
3365  */
3366 static inline struct meter *
3367 instr_meter_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3368 {
3369  struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3370 
3371  uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3372  uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3373  uint64_t idx64 = *idx64_ptr;
3374  uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
3375  uint64_t idx = idx64 & idx64_mask & r->size_mask;
3376 
3377  return &r->metarray[idx];
3378 }
3379 
3380 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3381 
3382 static inline struct meter *
3383 instr_meter_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3384 {
3385  struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3386 
3387  uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3388  uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3389  uint64_t idx64 = *idx64_ptr;
3390  uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
3391 
3392  return &r->metarray[idx];
3393 }
3394 
3395 #else
3396 
3397 #define instr_meter_idx_nbo instr_meter_idx_hbo
3398 
3399 #endif
3400 
3401 static inline struct meter *
3402 instr_meter_idx_imm(struct rte_swx_pipeline *p, const struct instruction *ip)
3403 {
3404  struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3405 
3406  uint64_t idx = ip->meter.idx_val & r->size_mask;
3407 
3408  return &r->metarray[idx];
3409 }
3410 
3411 static inline uint32_t
3412 instr_meter_length_hbo(struct thread *t, const struct instruction *ip)
3413 {
3414  uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3415  uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3416  uint64_t src64 = *src64_ptr;
3417  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
3418  uint64_t src = src64 & src64_mask;
3419 
3420  return (uint32_t)src;
3421 }
3422 
3423 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3424 
3425 static inline uint32_t
3426 instr_meter_length_nbo(struct thread *t, const struct instruction *ip)
3427 {
3428  uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3429  uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3430  uint64_t src64 = *src64_ptr;
3431  uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
3432 
3433  return (uint32_t)src;
3434 }
3435 
3436 #else
3437 
3438 #define instr_meter_length_nbo instr_meter_length_hbo
3439 
3440 #endif
3441 
3442 static inline enum rte_color
3443 instr_meter_color_in_hbo(struct thread *t, const struct instruction *ip)
3444 {
3445  uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
3446  uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
3447  uint64_t src64 = *src64_ptr;
3448  uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
3449  uint64_t src = src64 & src64_mask;
3450 
3451  return (enum rte_color)src;
3452 }
3453 
3454 static inline void
3455 instr_meter_color_out_hbo_set(struct thread *t,
3456  const struct instruction *ip,
3457  enum rte_color color_out)
3458 {
3459  uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
3460  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
3461  uint64_t dst64 = *dst64_ptr;
3462  uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
3463 
3464  uint64_t src = (uint64_t)color_out;
3465 
3466  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3467 }
3468 
3469 static inline void
3470 __instr_metprefetch_h_exec(struct rte_swx_pipeline *p,
3471  struct thread *t,
3472  const struct instruction *ip)
3473 {
3474  struct meter *m;
3475 
3476  TRACE("[Thread %2u] metprefetch (h)\n", p->thread_id);
3477 
3478  m = instr_meter_idx_nbo(p, t, ip);
3479  rte_prefetch0(m);
3480 }
3481 
3482 static inline void
3483 __instr_metprefetch_m_exec(struct rte_swx_pipeline *p,
3484  struct thread *t,
3485  const struct instruction *ip)
3486 {
3487  struct meter *m;
3488 
3489  TRACE("[Thread %2u] metprefetch (m)\n", p->thread_id);
3490 
3491  m = instr_meter_idx_hbo(p, t, ip);
3492  rte_prefetch0(m);
3493 }
3494 
3495 static inline void
3496 __instr_metprefetch_i_exec(struct rte_swx_pipeline *p,
3497  struct thread *t __rte_unused,
3498  const struct instruction *ip)
3499 {
3500  struct meter *m;
3501 
3502  TRACE("[Thread %2u] metprefetch (i)\n", p->thread_id);
3503 
3504  m = instr_meter_idx_imm(p, ip);
3505  rte_prefetch0(m);
3506 }
3507 
3508 static inline void
3509 __instr_meter_hhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3510 {
3511  struct meter *m;
3512  uint64_t time, n_pkts, n_bytes;
3513  uint32_t length;
3514  enum rte_color color_in, color_out;
3515 
3516  TRACE("[Thread %2u] meter (hhm)\n", p->thread_id);
3517 
3518  m = instr_meter_idx_nbo(p, t, ip);
3519  rte_prefetch0(m->n_pkts);
3520  time = rte_get_tsc_cycles();
3521  length = instr_meter_length_nbo(t, ip);
3522  color_in = instr_meter_color_in_hbo(t, ip);
3523 
3524  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3525  &m->profile->profile,
3526  time,
3527  length,
3528  color_in);
3529 
3530  color_out &= m->color_mask;
3531 
3532  n_pkts = m->n_pkts[color_out];
3533  n_bytes = m->n_bytes[color_out];
3534 
3535  instr_meter_color_out_hbo_set(t, ip, color_out);
3536 
3537  m->n_pkts[color_out] = n_pkts + 1;
3538  m->n_bytes[color_out] = n_bytes + length;
3539 }
3540 
3541 static inline void
3542 __instr_meter_hhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3543 {
3544  struct meter *m;
3545  uint64_t time, n_pkts, n_bytes;
3546  uint32_t length;
3547  enum rte_color color_in, color_out;
3548 
3549  TRACE("[Thread %2u] meter (hhi)\n", p->thread_id);
3550 
3551  m = instr_meter_idx_nbo(p, t, ip);
3552  rte_prefetch0(m->n_pkts);
3553  time = rte_get_tsc_cycles();
3554  length = instr_meter_length_nbo(t, ip);
3555  color_in = (enum rte_color)ip->meter.color_in_val;
3556 
3557  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3558  &m->profile->profile,
3559  time,
3560  length,
3561  color_in);
3562 
3563  color_out &= m->color_mask;
3564 
3565  n_pkts = m->n_pkts[color_out];
3566  n_bytes = m->n_bytes[color_out];
3567 
3568  instr_meter_color_out_hbo_set(t, ip, color_out);
3569 
3570  m->n_pkts[color_out] = n_pkts + 1;
3571  m->n_bytes[color_out] = n_bytes + length;
3572 }
3573 
3574 static inline void
3575 __instr_meter_hmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3576 {
3577  struct meter *m;
3578  uint64_t time, n_pkts, n_bytes;
3579  uint32_t length;
3580  enum rte_color color_in, color_out;
3581 
3582  TRACE("[Thread %2u] meter (hmm)\n", p->thread_id);
3583 
3584  m = instr_meter_idx_nbo(p, t, ip);
3585  rte_prefetch0(m->n_pkts);
3586  time = rte_get_tsc_cycles();
3587  length = instr_meter_length_hbo(t, ip);
3588  color_in = instr_meter_color_in_hbo(t, ip);
3589 
3590  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3591  &m->profile->profile,
3592  time,
3593  length,
3594  color_in);
3595 
3596  color_out &= m->color_mask;
3597 
3598  n_pkts = m->n_pkts[color_out];
3599  n_bytes = m->n_bytes[color_out];
3600 
3601  instr_meter_color_out_hbo_set(t, ip, color_out);
3602 
3603  m->n_pkts[color_out] = n_pkts + 1;
3604  m->n_bytes[color_out] = n_bytes + length;
3605 }
3606 
3607 static inline void
3608 __instr_meter_hmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3609 {
3610  struct meter *m;
3611  uint64_t time, n_pkts, n_bytes;
3612  uint32_t length;
3613  enum rte_color color_in, color_out;
3614 
3615  TRACE("[Thread %2u] meter (hmi)\n", p->thread_id);
3616 
3617  m = instr_meter_idx_nbo(p, t, ip);
3618  rte_prefetch0(m->n_pkts);
3619  time = rte_get_tsc_cycles();
3620  length = instr_meter_length_hbo(t, ip);
3621  color_in = (enum rte_color)ip->meter.color_in_val;
3622 
3623  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3624  &m->profile->profile,
3625  time,
3626  length,
3627  color_in);
3628 
3629  color_out &= m->color_mask;
3630 
3631  n_pkts = m->n_pkts[color_out];
3632  n_bytes = m->n_bytes[color_out];
3633 
3634  instr_meter_color_out_hbo_set(t, ip, color_out);
3635 
3636  m->n_pkts[color_out] = n_pkts + 1;
3637  m->n_bytes[color_out] = n_bytes + length;
3638 }
3639 
3640 static inline void
3641 __instr_meter_mhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3642 {
3643  struct meter *m;
3644  uint64_t time, n_pkts, n_bytes;
3645  uint32_t length;
3646  enum rte_color color_in, color_out;
3647 
3648  TRACE("[Thread %2u] meter (mhm)\n", p->thread_id);
3649 
3650  m = instr_meter_idx_hbo(p, t, ip);
3651  rte_prefetch0(m->n_pkts);
3652  time = rte_get_tsc_cycles();
3653  length = instr_meter_length_nbo(t, ip);
3654  color_in = instr_meter_color_in_hbo(t, ip);
3655 
3656  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3657  &m->profile->profile,
3658  time,
3659  length,
3660  color_in);
3661 
3662  color_out &= m->color_mask;
3663 
3664  n_pkts = m->n_pkts[color_out];
3665  n_bytes = m->n_bytes[color_out];
3666 
3667  instr_meter_color_out_hbo_set(t, ip, color_out);
3668 
3669  m->n_pkts[color_out] = n_pkts + 1;
3670  m->n_bytes[color_out] = n_bytes + length;
3671 }
3672 
3673 static inline void
3674 __instr_meter_mhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3675 {
3676  struct meter *m;
3677  uint64_t time, n_pkts, n_bytes;
3678  uint32_t length;
3679  enum rte_color color_in, color_out;
3680 
3681  TRACE("[Thread %2u] meter (mhi)\n", p->thread_id);
3682 
3683  m = instr_meter_idx_hbo(p, t, ip);
3684  rte_prefetch0(m->n_pkts);
3685  time = rte_get_tsc_cycles();
3686  length = instr_meter_length_nbo(t, ip);
3687  color_in = (enum rte_color)ip->meter.color_in_val;
3688 
3689  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3690  &m->profile->profile,
3691  time,
3692  length,
3693  color_in);
3694 
3695  color_out &= m->color_mask;
3696 
3697  n_pkts = m->n_pkts[color_out];
3698  n_bytes = m->n_bytes[color_out];
3699 
3700  instr_meter_color_out_hbo_set(t, ip, color_out);
3701 
3702  m->n_pkts[color_out] = n_pkts + 1;
3703  m->n_bytes[color_out] = n_bytes + length;
3704 }
3705 
3706 static inline void
3707 __instr_meter_mmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3708 {
3709  struct meter *m;
3710  uint64_t time, n_pkts, n_bytes;
3711  uint32_t length;
3712  enum rte_color color_in, color_out;
3713 
3714  TRACE("[Thread %2u] meter (mmm)\n", p->thread_id);
3715 
3716  m = instr_meter_idx_hbo(p, t, ip);
3717  rte_prefetch0(m->n_pkts);
3718  time = rte_get_tsc_cycles();
3719  length = instr_meter_length_hbo(t, ip);
3720  color_in = instr_meter_color_in_hbo(t, ip);
3721 
3722  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3723  &m->profile->profile,
3724  time,
3725  length,
3726  color_in);
3727 
3728  color_out &= m->color_mask;
3729 
3730  n_pkts = m->n_pkts[color_out];
3731  n_bytes = m->n_bytes[color_out];
3732 
3733  instr_meter_color_out_hbo_set(t, ip, color_out);
3734 
3735  m->n_pkts[color_out] = n_pkts + 1;
3736  m->n_bytes[color_out] = n_bytes + length;
3737 }
3738 
3739 static inline void
3740 __instr_meter_mmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3741 {
3742  struct meter *m;
3743  uint64_t time, n_pkts, n_bytes;
3744  uint32_t length;
3745  enum rte_color color_in, color_out;
3746 
3747  TRACE("[Thread %2u] meter (mmi)\n", p->thread_id);
3748 
3749  m = instr_meter_idx_hbo(p, t, ip);
3750  rte_prefetch0(m->n_pkts);
3751  time = rte_get_tsc_cycles();
3752  length = instr_meter_length_hbo(t, ip);
3753  color_in = (enum rte_color)ip->meter.color_in_val;
3754 
3755  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3756  &m->profile->profile,
3757  time,
3758  length,
3759  color_in);
3760 
3761  color_out &= m->color_mask;
3762 
3763  n_pkts = m->n_pkts[color_out];
3764  n_bytes = m->n_bytes[color_out];
3765 
3766  instr_meter_color_out_hbo_set(t, ip, color_out);
3767 
3768  m->n_pkts[color_out] = n_pkts + 1;
3769  m->n_bytes[color_out] = n_bytes + length;
3770 }
3771 
3772 static inline void
3773 __instr_meter_ihm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3774 {
3775  struct meter *m;
3776  uint64_t time, n_pkts, n_bytes;
3777  uint32_t length;
3778  enum rte_color color_in, color_out;
3779 
3780  TRACE("[Thread %2u] meter (ihm)\n", p->thread_id);
3781 
3782  m = instr_meter_idx_imm(p, ip);
3783  rte_prefetch0(m->n_pkts);
3784  time = rte_get_tsc_cycles();
3785  length = instr_meter_length_nbo(t, ip);
3786  color_in = instr_meter_color_in_hbo(t, ip);
3787 
3788  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3789  &m->profile->profile,
3790  time,
3791  length,
3792  color_in);
3793 
3794  color_out &= m->color_mask;
3795 
3796  n_pkts = m->n_pkts[color_out];
3797  n_bytes = m->n_bytes[color_out];
3798 
3799  instr_meter_color_out_hbo_set(t, ip, color_out);
3800 
3801  m->n_pkts[color_out] = n_pkts + 1;
3802  m->n_bytes[color_out] = n_bytes + length;
3803 }
3804 
3805 static inline void
3806 __instr_meter_ihi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3807 {
3808  struct meter *m;
3809  uint64_t time, n_pkts, n_bytes;
3810  uint32_t length;
3811  enum rte_color color_in, color_out;
3812 
3813  TRACE("[Thread %2u] meter (ihi)\n", p->thread_id);
3814 
3815  m = instr_meter_idx_imm(p, ip);
3816  rte_prefetch0(m->n_pkts);
3817  time = rte_get_tsc_cycles();
3818  length = instr_meter_length_nbo(t, ip);
3819  color_in = (enum rte_color)ip->meter.color_in_val;
3820 
3821  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3822  &m->profile->profile,
3823  time,
3824  length,
3825  color_in);
3826 
3827  color_out &= m->color_mask;
3828 
3829  n_pkts = m->n_pkts[color_out];
3830  n_bytes = m->n_bytes[color_out];
3831 
3832  instr_meter_color_out_hbo_set(t, ip, color_out);
3833 
3834  m->n_pkts[color_out] = n_pkts + 1;
3835  m->n_bytes[color_out] = n_bytes + length;
3836 }
3837 
3838 static inline void
3839 __instr_meter_imm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3840 {
3841  struct meter *m;
3842  uint64_t time, n_pkts, n_bytes;
3843  uint32_t length;
3844  enum rte_color color_in, color_out;
3845 
3846  TRACE("[Thread %2u] meter (imm)\n", p->thread_id);
3847 
3848  m = instr_meter_idx_imm(p, ip);
3849  rte_prefetch0(m->n_pkts);
3850  time = rte_get_tsc_cycles();
3851  length = instr_meter_length_hbo(t, ip);
3852  color_in = instr_meter_color_in_hbo(t, ip);
3853 
3854  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3855  &m->profile->profile,
3856  time,
3857  length,
3858  color_in);
3859 
3860  color_out &= m->color_mask;
3861 
3862  n_pkts = m->n_pkts[color_out];
3863  n_bytes = m->n_bytes[color_out];
3864 
3865  instr_meter_color_out_hbo_set(t, ip, color_out);
3866 
3867  m->n_pkts[color_out] = n_pkts + 1;
3868  m->n_bytes[color_out] = n_bytes + length;
3869 }
3870 
3871 static inline void
3872 __instr_meter_imi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3873 {
3874  struct meter *m;
3875  uint64_t time, n_pkts, n_bytes;
3876  uint32_t length;
3877  enum rte_color color_in, color_out;
3878 
3879  TRACE("[Thread %2u] meter (imi)\n", p->thread_id);
3880 
3881  m = instr_meter_idx_imm(p, ip);
3882  rte_prefetch0(m->n_pkts);
3883  time = rte_get_tsc_cycles();
3884  length = instr_meter_length_hbo(t, ip);
3885  color_in = (enum rte_color)ip->meter.color_in_val;
3886 
3887  color_out = rte_meter_trtcm_color_aware_check(&m->m,
3888  &m->profile->profile,
3889  time,
3890  length,
3891  color_in);
3892 
3893  color_out &= m->color_mask;
3894 
3895  n_pkts = m->n_pkts[color_out];
3896  n_bytes = m->n_bytes[color_out];
3897 
3898  instr_meter_color_out_hbo_set(t, ip, color_out);
3899 
3900  m->n_pkts[color_out] = n_pkts + 1;
3901  m->n_bytes[color_out] = n_bytes + length;
3902 }
3903 
3904 #endif
#define __rte_unused
Definition: rte_common.h:123
static uint64_t rte_get_tsc_cycles(void)
static enum rte_color rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, uint32_t pkt_len, enum rte_color pkt_color)
Definition: rte_meter.h:539
rte_color
Definition: rte_meter.h:36
@ RTE_COLORS
Definition: rte_meter.h:40
static void rte_prefetch0(const volatile void *p)
void(* rte_swx_extern_type_destructor_t)(void *object)
void *(* rte_swx_extern_type_constructor_t)(const char *args)
int(* rte_swx_extern_func_t)(void *mailbox)
int(* rte_swx_extern_type_member_func_t)(void *object, void *mailbox)
#define RTE_SWX_NAME_SIZE
int(* rte_swx_port_in_pkt_rx_t)(void *port, struct rte_swx_pkt *pkt)
Definition: rte_swx_port.h:72
void(* rte_swx_port_out_flush_t)(void *port)
Definition: rte_swx_port.h:157
void(* rte_swx_port_out_pkt_tx_t)(void *port, struct rte_swx_pkt *pkt)
Definition: rte_swx_port.h:147
rte_swx_table_match_type
Definition: rte_swx_table.h:23
int(* rte_swx_table_lookup_t)(void *table, void *mailbox, uint8_t **key, uint64_t *action_id, uint8_t **action_data, int *hit)
__rte_experimental uint32_t rte_swx_table_learner_add(void *table, void *mailbox, uint64_t time, uint64_t action_id, uint8_t *action_data)
__rte_experimental void rte_swx_table_learner_delete(void *table, void *mailbox)
uint8_t * pkt
Definition: rte_swx_port.h:26
uint32_t offset
Definition: rte_swx_port.h:29
uint32_t length
Definition: rte_swx_port.h:32