diff options
1160 files changed, 37523 insertions, 18372 deletions
diff --git a/Documentation/admin-guide/blockdev/drbd/figures.rst b/Documentation/admin-guide/blockdev/drbd/figures.rst index bd9a4901fe46..9f73253ea353 100644 --- a/Documentation/admin-guide/blockdev/drbd/figures.rst +++ b/Documentation/admin-guide/blockdev/drbd/figures.rst @@ -25,6 +25,6 @@ Sub graphs of DRBD's state transitions :alt: disk-states-8.dot :align: center -.. kernel-figure:: node-states-8.dot - :alt: node-states-8.dot +.. kernel-figure:: peer-states-8.dot + :alt: peer-states-8.dot :align: center diff --git a/Documentation/admin-guide/blockdev/drbd/node-states-8.dot b/Documentation/admin-guide/blockdev/drbd/peer-states-8.dot index bfa54e1f8016..6dc3954954d6 100644 --- a/Documentation/admin-guide/blockdev/drbd/node-states-8.dot +++ b/Documentation/admin-guide/blockdev/drbd/peer-states-8.dot @@ -1,8 +1,3 @@ -digraph node_states { - Secondary -> Primary [ label = "ioctl_set_state()" ] - Primary -> Secondary [ label = "ioctl_set_state()" ] -} - digraph peer_states { Secondary -> Primary [ label = "recv state packet" ] Primary -> Secondary [ label = "recv state packet" ] diff --git a/Documentation/arm64/pointer-authentication.rst b/Documentation/arm64/pointer-authentication.rst index f127666ea3a8..e5dad2e40aa8 100644 --- a/Documentation/arm64/pointer-authentication.rst +++ b/Documentation/arm64/pointer-authentication.rst @@ -53,11 +53,10 @@ The number of bits that the PAC occupies in a pointer is 55 minus the virtual address size configured by the kernel. For example, with a virtual address size of 48, the PAC is 7 bits wide. -Recent versions of GCC can compile code with APIAKey-based return -address protection when passed the -msign-return-address option. This -uses instructions in the HINT space (unless -march=armv8.3-a or higher -is also passed), and such code can run on systems without the pointer -authentication extension. +When ARM64_PTR_AUTH_KERNEL is selected, the kernel will be compiled +with HINT space pointer authentication instructions protecting +function returns. Kernels built with this option will work on hardware +with or without pointer authentication support. In addition to exec(), keys can also be reinitialized to random values using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY, diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst index d0ec40d00c28..1ebf4c5c7ddc 100644 --- a/Documentation/bpf/btf.rst +++ b/Documentation/bpf/btf.rst @@ -3,7 +3,7 @@ BPF Type Format (BTF) ===================== 1. Introduction -*************** +=============== BTF (BPF Type Format) is the metadata format which encodes the debug info related to BPF program/map. The name BTF was used initially to describe data @@ -30,7 +30,7 @@ sections are discussed in details in :ref:`BTF_Type_String`. .. _BTF_Type_String: 2. BTF Type and String Encoding -******************************* +=============================== The file ``include/uapi/linux/btf.h`` provides high-level definition of how types/strings are encoded. @@ -57,13 +57,13 @@ little-endian target. The ``btf_header`` is designed to be extensible with generated. 2.1 String Encoding -=================== +------------------- The first string in the string section must be a null string. The rest of string table is a concatenation of other null-terminated strings. 2.2 Type Encoding -================= +----------------- The type id ``0`` is reserved for ``void`` type. The type section is parsed sequentially and type id is assigned to each recognized type starting from id @@ -504,7 +504,7 @@ valid index (starting from 0) pointing to a member or an argument. * ``type``: the type with ``btf_type_tag`` attribute 3. BTF Kernel API -***************** +================= The following bpf syscall command involves BTF: * BPF_BTF_LOAD: load a blob of BTF data into kernel @@ -547,14 +547,14 @@ The workflow typically looks like: 3.1 BPF_BTF_LOAD -================ +---------------- Load a blob of BTF data into kernel. A blob of data, described in :ref:`BTF_Type_String`, can be directly loaded into the kernel. A ``btf_fd`` is returned to a userspace. 3.2 BPF_MAP_CREATE -================== +------------------ A map can be created with ``btf_fd`` and specified key/value type id.:: @@ -581,7 +581,7 @@ automatically. .. _BPF_Prog_Load: 3.3 BPF_PROG_LOAD -================= +----------------- During prog_load, func_info and line_info can be passed to kernel with proper values for the following attributes: @@ -631,7 +631,7 @@ For line_info, the line number and column number are defined as below: #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) 3.4 BPF_{PROG,MAP}_GET_NEXT_ID -============================== +------------------------------ In kernel, every loaded program, map or btf has a unique id. The id won't change during the lifetime of a program, map, or btf. @@ -641,13 +641,13 @@ each command, to user space, for bpf program or maps, respectively, so an inspection tool can inspect all programs and maps. 3.5 BPF_{PROG,MAP}_GET_FD_BY_ID -=============================== +------------------------------- An introspection tool cannot use id to get details about program or maps. A file descriptor needs to be obtained first for reference-counting purpose. 3.6 BPF_OBJ_GET_INFO_BY_FD -========================== +-------------------------- Once a program/map fd is acquired, an introspection tool can get the detailed information from kernel about this fd, some of which are BTF-related. For @@ -656,7 +656,7 @@ example, ``bpf_map_info`` returns ``btf_id`` and key/value type ids. bpf byte codes, and jited_line_info. 3.7 BPF_BTF_GET_FD_BY_ID -======================== +------------------------ With ``btf_id`` obtained in ``bpf_map_info`` and ``bpf_prog_info``, bpf syscall command BPF_BTF_GET_FD_BY_ID can retrieve a btf fd. Then, with @@ -668,10 +668,10 @@ tool has full btf knowledge and is able to pretty print map key/values, dump func signatures and line info, along with byte/jit codes. 4. ELF File Format Interface -**************************** +============================ 4.1 .BTF section -================ +---------------- The .BTF section contains type and string data. The format of this section is same as the one describe in :ref:`BTF_Type_String`. @@ -679,7 +679,7 @@ same as the one describe in :ref:`BTF_Type_String`. .. _BTF_Ext_Section: 4.2 .BTF.ext section -==================== +-------------------- The .BTF.ext section encodes func_info and line_info which needs loader manipulation before loading into the kernel. @@ -743,7 +743,7 @@ bpf_insn``. For ELF API, the ``insn_off`` is the byte offset from the beginning of section (``btf_ext_info_sec->sec_name_off``). 4.2 .BTF_ids section -==================== +-------------------- The .BTF_ids section encodes BTF ID values that are used within the kernel. @@ -804,10 +804,10 @@ All the BTF ID lists and sets are compiled in the .BTF_ids section and resolved during the linking phase of kernel build by ``resolve_btfids`` tool. 5. Using BTF -************ +============ 5.1 bpftool map pretty print -============================ +---------------------------- With BTF, the map key/value can be printed based on fields rather than simply raw bytes. This is especially valuable for large structure or if your data @@ -849,7 +849,7 @@ bpftool is able to pretty print like below: ] 5.2 bpftool prog dump -===================== +--------------------- The following is an example showing how func_info and line_info can help prog dump with better kernel symbol names, function prototypes and line @@ -883,7 +883,7 @@ information.:: [...] 5.3 Verifier Log -================ +---------------- The following is an example of how line_info can help debugging verification failure.:: @@ -909,7 +909,7 @@ failure.:: R2 offset is outside of the packet 6. BTF Generation -***************** +================= You need latest pahole @@ -1016,6 +1016,6 @@ format.:: .long 8206 # Line 8 Col 14 7. Testing -********** +========== Kernel bpf selftest `test_btf.c` provides extensive set of BTF-related tests. diff --git a/Documentation/bpf/faq.rst b/Documentation/bpf/faq.rst new file mode 100644 index 000000000000..a622602ce9ad --- /dev/null +++ b/Documentation/bpf/faq.rst @@ -0,0 +1,11 @@ +================================ +Frequently asked questions (FAQ) +================================ + +Two sets of Questions and Answers (Q&A) are maintained. + +.. toctree:: + :maxdepth: 1 + + bpf_design_QA + bpf_devel_QA diff --git a/Documentation/bpf/helpers.rst b/Documentation/bpf/helpers.rst new file mode 100644 index 000000000000..c4ee0cc20dec --- /dev/null +++ b/Documentation/bpf/helpers.rst @@ -0,0 +1,7 @@ +Helper functions +================ + +* `bpf-helpers(7)`_ maintains a list of helpers available to eBPF programs. + +.. Links +.. _bpf-helpers(7): https://man7.org/linux/man-pages/man7/bpf-helpers.7.html
\ No newline at end of file diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index 610450f59e05..91ba5a62026b 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -5,104 +5,32 @@ BPF Documentation This directory contains documentation for the BPF (Berkeley Packet Filter) facility, with a focus on the extended BPF version (eBPF). -This kernel side documentation is still work in progress. The main -textual documentation is (for historical reasons) described in -:ref:`networking-filter`, which describe both classical and extended -BPF instruction-set. +This kernel side documentation is still work in progress. The Cilium project also maintains a `BPF and XDP Reference Guide`_ that goes into great technical depth about the BPF Architecture. -libbpf -====== - -Documentation/bpf/libbpf/index.rst is a userspace library for loading and interacting with bpf programs. - -BPF Type Format (BTF) -===================== - .. toctree:: :maxdepth: 1 + instruction-set + verifier + libbpf/index btf - - -Frequently asked questions (FAQ) -================================ - -Two sets of Questions and Answers (Q&A) are maintained. - -.. toctree:: - :maxdepth: 1 - - bpf_design_QA - bpf_devel_QA - -Syscall API -=========== - -The primary info for the bpf syscall is available in the `man-pages`_ -for `bpf(2)`_. For more information about the userspace API, see -Documentation/userspace-api/ebpf/index.rst. - -Helper functions -================ - -* `bpf-helpers(7)`_ maintains a list of helpers available to eBPF programs. - - -Program types -============= - -.. toctree:: - :maxdepth: 1 - - prog_cgroup_sockopt - prog_cgroup_sysctl - prog_flow_dissector - bpf_lsm - prog_sk_lookup - - -Map types -========= - -.. toctree:: - :maxdepth: 1 - - map_cgroup_storage - - -Testing and debugging BPF -========================= - -.. toctree:: - :maxdepth: 1 - - drgn - s390 - - -Licensing -========= - -.. toctree:: - :maxdepth: 1 - + faq + syscall_api + helpers + programs + maps bpf_licensing + test_debug + other +.. only:: subproject and html -Other -===== - -.. toctree:: - :maxdepth: 1 + Indices + ======= - ringbuf - llvm_reloc + * :ref:`genindex` .. Links: -.. _networking-filter: ../networking/filter.rst -.. _man-pages: https://www.kernel.org/doc/man-pages/ -.. _bpf(2): https://man7.org/linux/man-pages/man2/bpf.2.html -.. _bpf-helpers(7): https://man7.org/linux/man-pages/man7/bpf-helpers.7.html .. _BPF and XDP Reference Guide: https://docs.cilium.io/en/latest/bpf/ diff --git a/Documentation/bpf/instruction-set.rst b/Documentation/bpf/instruction-set.rst new file mode 100644 index 000000000000..fa7cba59031e --- /dev/null +++ b/Documentation/bpf/instruction-set.rst @@ -0,0 +1,467 @@ + +==================== +eBPF Instruction Set +==================== + +eBPF is designed to be JITed with one to one mapping, which can also open up +the possibility for GCC/LLVM compilers to generate optimized eBPF code through +an eBPF backend that performs almost as fast as natively compiled code. + +Some core changes of the eBPF format from classic BPF: + +- Number of registers increase from 2 to 10: + + The old format had two registers A and X, and a hidden frame pointer. The + new layout extends this to be 10 internal registers and a read-only frame + pointer. Since 64-bit CPUs are passing arguments to functions via registers + the number of args from eBPF program to in-kernel function is restricted + to 5 and one register is used to accept return value from an in-kernel + function. Natively, x86_64 passes first 6 arguments in registers, aarch64/ + sparcv9/mips64 have 7 - 8 registers for arguments; x86_64 has 6 callee saved + registers, and aarch64/sparcv9/mips64 have 11 or more callee saved registers. + + Therefore, eBPF calling convention is defined as: + + * R0 - return value from in-kernel function, and exit value for eBPF program + * R1 - R5 - arguments from eBPF program to in-kernel function + * R6 - R9 - callee saved registers that in-kernel function will preserve + * R10 - read-only frame pointer to access stack + + Thus, all eBPF registers map one to one to HW registers on x86_64, aarch64, + etc, and eBPF calling convention maps directly to ABIs used by the kernel on + 64-bit architectures. + + On 32-bit architectures JIT may map programs that use only 32-bit arithmetic + and may let more complex programs to be interpreted. + + R0 - R5 are scratch registers and eBPF program needs spill/fill them if + necessary across calls. Note that there is only one eBPF program (== one + eBPF main routine) and it cannot call other eBPF functions, it can only + call predefined in-kernel functions, though. + +- Register width increases from 32-bit to 64-bit: + + Still, the semantics of the original 32-bit ALU operations are preserved + via 32-bit subregisters. All eBPF registers are 64-bit with 32-bit lower + subregisters that zero-extend into 64-bit if they are being written to. + That behavior maps directly to x86_64 and arm64 subregister definition, but + makes other JITs more difficult. + + 32-bit architectures run 64-bit eBPF programs via interpreter. + Their JITs may convert BPF programs that only use 32-bit subregisters into + native instruction set and let the rest being interpreted. + + Operation is 64-bit, because on 64-bit architectures, pointers are also + 64-bit wide, and we want to pass 64-bit values in/out of kernel functions, + so 32-bit eBPF registers would otherwise require to define register-pair + ABI, thus, there won't be able to use a direct eBPF register to HW register + mapping and JIT would need to do combine/split/move operations for every + register in and out of the function, which is complex, bug prone and slow. + Another reason is the use of atomic 64-bit counters. + +- Conditional jt/jf targets replaced with jt/fall-through: + + While the original design has constructs such as ``if (cond) jump_true; + else jump_false;``, they are being replaced into alternative constructs like + ``if (cond) jump_true; /* else fall-through */``. + +- Introduces bpf_call insn and register passing convention for zero overhead + calls from/to other kernel functions: + + Before an in-kernel function call, the eBPF program needs to + place function arguments into R1 to R5 registers to satisfy calling + convention, then the interpreter will take them from registers and pass + to in-kernel function. If R1 - R5 registers are mapped to CPU registers + that are used for argument passing on given architecture, the JIT compiler + doesn't need to emit extra moves. Function arguments will be in the correct + registers and BPF_CALL instruction will be JITed as single 'call' HW + instruction. This calling convention was picked to cover common call + situations without performance penalty. + + After an in-kernel function call, R1 - R5 are reset to unreadable and R0 has + a return value of the function. Since R6 - R9 are callee saved, their state + is preserved across the call. + + For example, consider three C functions:: + + u64 f1() { return (*_f2)(1); } + u64 f2(u64 a) { return f3(a + 1, a); } + u64 f3(u64 a, u64 b) { return a - b; } + + GCC can compile f1, f3 into x86_64:: + + f1: + movl $1, %edi + movq _f2(%rip), %rax + jmp *%rax + f3: + movq %rdi, %rax + subq %rsi, %rax + ret + + Function f2 in eBPF may look like:: + + f2: + bpf_mov R2, R1 + bpf_add R1, 1 + bpf_call f3 + bpf_exit + + If f2 is JITed and the pointer stored to ``_f2``. The calls f1 -> f2 -> f3 and + returns will be seamless. Without JIT, __bpf_prog_run() interpreter needs to + be used to call into f2. + + For practical reasons all eBPF programs have only one argument 'ctx' which is + already placed into R1 (e.g. on __bpf_prog_run() startup) and the programs + can call kernel functions with up to 5 arguments. Calls with 6 or more arguments + are currently not supported, but these restrictions can be lifted if necessary + in the future. + + On 64-bit architectures all register map to HW registers one to one. For + example, x86_64 JIT compiler can map them as ... + + :: + + R0 - rax + R1 - rdi + R2 - rsi + R3 - rdx + R4 - rcx + R5 - r8 + R6 - rbx + R7 - r13 + R8 - r14 + R9 - r15 + R10 - rbp + + ... since x86_64 ABI mandates rdi, rsi, rdx, rcx, r8, r9 for argument passing + and rbx, r12 - r15 are callee saved. + + Then the following eBPF pseudo-program:: + + bpf_mov R6, R1 /* save ctx */ + bpf_mov R2, 2 + bpf_mov R3, 3 + bpf_mov R4, 4 + bpf_mov R5, 5 + bpf_call foo + bpf_mov R7, R0 /* save foo() return value */ + bpf_mov R1, R6 /* restore ctx for next call */ + bpf_mov R2, 6 + bpf_mov R3, 7 + bpf_mov R4, 8 + bpf_mov R5, 9 + bpf_call bar + bpf_add R0, R7 + bpf_exit + + After JIT to x86_64 may look like:: + + push %rbp + mov %rsp,%rbp + sub $0x228,%rsp + mov %rbx,-0x228(%rbp) + mov %r13,-0x220(%rbp) + mov %rdi,%rbx + mov $0x2,%esi + mov $0x3,%edx + mov $0x4,%ecx + mov $0x5,%r8d + callq foo + mov %rax,%r13 + mov %rbx,%rdi + mov $0x6,%esi + mov $0x7,%edx + mov $0x8,%ecx + mov $0x9,%r8d + callq bar + add %r13,%rax + mov -0x228(%rbp),%rbx + mov -0x220(%rbp),%r13 + leaveq + retq + + Which is in this example equivalent in C to:: + + u64 bpf_filter(u64 ctx) + { + return foo(ctx, 2, 3, 4, 5) + bar(ctx, 6, 7, 8, 9); + } + + In-kernel functions foo() and bar() with prototype: u64 (*)(u64 arg1, u64 + arg2, u64 arg3, u64 arg4, u64 arg5); will receive arguments in proper + registers and place their return value into ``%rax`` which is R0 in eBPF. + Prologue and epilogue are emitted by JIT and are implicit in the + interpreter. R0-R5 are scratch registers, so eBPF program needs to preserve + them across the calls as defined by calling convention. + + For example the following program is invalid:: + + bpf_mov R1, 1 + bpf_call foo + bpf_mov R0, R1 + bpf_exit + + After the call the registers R1-R5 contain junk values and cannot be read. + An in-kernel `eBPF verifier`_ is used to validate eBPF programs. + +Also in the new design, eBPF is limited to 4096 insns, which means that any +program will terminate quickly and will only call a fixed number of kernel +functions. Original BPF and eBPF are two operand instructions, +which helps to do one-to-one mapping between eBPF insn and x86 insn during JIT. + +The input context pointer for invoking the interpreter function is generic, +its content is defined by a specific use case. For seccomp register R1 points +to seccomp_data, for converted BPF filters R1 points to a skb. + +A program, that is translated internally consists of the following elements:: + + op:16, jt:8, jf:8, k:32 ==> op:8, dst_reg:4, src_reg:4, off:16, imm:32 + +So far 87 eBPF instructions were implemented. 8-bit 'op' opcode field +has room for new instructions. Some of them may use 16/24/32 byte encoding. New +instructions must be multiple of 8 bytes to preserve backward compatibility. + +eBPF is a general purpose RISC instruction set. Not every register and +every instruction are used during translation from original BPF to eBPF. +For example, socket filters are not using ``exclusive add`` instruction, but +tracing filters may do to maintain counters of events, for example. Register R9 +is not used by socket filters either, but more complex filters may be running +out of registers and would have to resort to spill/fill to stack. + +eBPF can be used as a generic assembler for last step performance +optimizations, socket filters and seccomp are using it as assembler. Tracing +filters may use it as assembler to generate code from kernel. In kernel usage +may not be bounded by security considerations, since generated eBPF code +may be optimizing internal code path and not being exposed to the user space. +Safety of eBPF can come from the `eBPF verifier`_. In such use cases as +described, it may be used as safe instruction set. + +Just like the original BPF, eBPF runs within a controlled environment, +is deterministic and the kernel can easily prove that. The safety of the program +can be determined in two steps: first step does depth-first-search to disallow +loops and other CFG validation; second step starts from the first insn and +descends all possible paths. It simulates execution of every insn and observes +the state change of registers and stack. + +eBPF opcode encoding +==================== + +eBPF is reusing most of the opcode encoding from classic to simplify conversion +of classic BPF to eBPF. For arithmetic and jump instructions the 8-bit 'code' +field is divided into three parts:: + + +----------------+--------+--------------------+ + | 4 bits | 1 bit | 3 bits | + | operation code | source | instruction class | + +----------------+--------+--------------------+ + (MSB) (LSB) + +Three LSB bits store instruction class which is one of: + + =================== =============== + Classic BPF classes eBPF classes + =================== =============== + BPF_LD 0x00 BPF_LD 0x00 + BPF_LDX 0x01 BPF_LDX 0x01 + BPF_ST 0x02 BPF_ST 0x02 + BPF_STX 0x03 BPF_STX 0x03 + BPF_ALU 0x04 BPF_ALU 0x04 + BPF_JMP 0x05 BPF_JMP 0x05 + BPF_RET 0x06 BPF_JMP32 0x06 + BPF_MISC 0x07 BPF_ALU64 0x07 + =================== =============== + +When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ... + + :: + + BPF_K 0x00 + BPF_X 0x08 + + * in classic BPF, this means:: + + BPF_SRC(code) == BPF_X - use register X as source operand + BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand + + * in eBPF, this means:: + + BPF_SRC(code) == BPF_X - use 'src_reg' register as source operand + BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand + +... and four MSB bits store operation code. + +If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of:: + + BPF_ADD 0x00 + BPF_SUB 0x10 + BPF_MUL 0x20 + BPF_DIV 0x30 + BPF_OR 0x40 + BPF_AND 0x50 + BPF_LSH 0x60 + BPF_RSH 0x70 + BPF_NEG 0x80 + BPF_MOD 0x90 + BPF_XOR 0xa0 + BPF_MOV 0xb0 /* eBPF only: mov reg to reg */ + BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */ + BPF_END 0xd0 /* eBPF only: endianness conversion */ + +If BPF_CLASS(code) == BPF_JMP or BPF_JMP32 [ in eBPF ], BPF_OP(code) is one of:: + + BPF_JA 0x00 /* BPF_JMP only */ + BPF_JEQ 0x10 + BPF_JGT 0x20 + BPF_JGE 0x30 + BPF_JSET 0x40 + BPF_JNE 0x50 /* eBPF only: jump != */ + BPF_JSGT 0x60 /* eBPF only: signed '>' */ + BPF_JSGE 0x70 /* eBPF only: signed '>=' */ + BPF_CALL 0x80 /* eBPF BPF_JMP only: function call */ + BPF_EXIT 0x90 /* eBPF BPF_JMP only: function return */ + BPF_JLT 0xa0 /* eBPF only: unsigned '<' */ + BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */ + BPF_JSLT 0xc0 /* eBPF only: signed '<' */ + BPF_JSLE 0xd0 /* eBPF only: signed '<=' */ + +So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF +and eBPF. There are only two registers in classic BPF, so it means A += X. +In eBPF it means dst_reg = (u32) dst_reg + (u32) src_reg; similarly, +BPF_XOR | BPF_K | BPF_ALU means A ^= imm32 in classic BPF and analogous +src_reg = (u32) src_reg ^ (u32) imm32 in eBPF. + +Classic BPF is using BPF_MISC class to represent A = X and X = A moves. +eBPF is using BPF_MOV | BPF_X | BPF_ALU code instead. Since there are no +BPF_MISC operations in eBPF, the class 7 is used as BPF_ALU64 to mean +exactly the same operations as BPF_ALU, but with 64-bit wide operands +instead. So BPF_ADD | BPF_X | BPF_ALU64 means 64-bit addition, i.e.: +dst_reg = dst_reg + src_reg + +Classic BPF wastes the whole BPF_RET class to represent a single ``ret`` +operation. Classic BPF_RET | BPF_K means copy imm32 into return register +and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT +in eBPF means function exit only. The eBPF program needs to store return +value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is used as +BPF_JMP32 to mean exactly the same operations as BPF_JMP, but with 32-bit wide +operands for the comparisons instead. + +For load and store instructions the 8-bit 'code' field is divided as:: + + +--------+--------+-------------------+ + | 3 bits | 2 bits | 3 bits | + | mode | size | instruction class | + +--------+--------+-------------------+ + (MSB) (LSB) + +Size modifier is one of ... + +:: + + BPF_W 0x00 /* word */ + BPF_H 0x08 /* half word */ + BPF_B 0x10 /* byte */ + BPF_DW 0x18 /* eBPF only, double word */ + +... which encodes size of load/store operation:: + + B - 1 byte + H - 2 byte + W - 4 byte + DW - 8 byte (eBPF only) + +Mode modifier is one of:: + + BPF_IMM 0x00 /* used for 32-bit mov in classic BPF and 64-bit in eBPF */ + BPF_ABS 0x20 + BPF_IND 0x40 + BPF_MEM 0x60 + BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */ + BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */ + BPF_ATOMIC 0xc0 /* eBPF only, atomic operations */ + +eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and +(BPF_IND | <size> | BPF_LD) which are used to access packet data. + +They had to be carried over from classic to have strong performance of +socket filters running in eBPF interpreter. These instructions can only +be used when interpreter context is a pointer to ``struct sk_buff`` and +have seven implicit operands. Register R6 is an implicit input that must +contain pointer to sk_buff. Register R0 is an implicit output which contains +the data fetched from the packet. Registers R1-R5 are scratch registers +and must not be used to store the data across BPF_ABS | BPF_LD or +BPF_IND | BPF_LD instructions. + +These instructions have implicit program exit condition as well. When +eBPF program is trying to access the data beyond the packet boundary, +the interpreter will abort the execution of the program. JIT compilers +therefore must preserve this property. src_reg and imm32 fields are +explicit inputs to these instructions. + +For example:: + + BPF_IND | BPF_W | BPF_LD means: + + R0 = ntohl(*(u32 *) (((struct sk_buff *) R6)->data + src_reg + imm32)) + and R1 - R5 were scratched. + +Unlike classic BPF instruction set, eBPF has generic load/store operations:: + + BPF_MEM | <size> | BPF_STX: *(size *) (dst_reg + off) = src_reg + BPF_MEM | <size> | BPF_ST: *(size *) (dst_reg + off) = imm32 + BPF_MEM | <size> | BPF_LDX: dst_reg = *(size *) (src_reg + off) + +Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. + +It also includes atomic operations, which use the immediate field for extra +encoding:: + + .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg + .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg + +The basic atomic operations supported are:: + + BPF_ADD + BPF_AND + BPF_OR + BPF_XOR + +Each having equivalent semantics with the ``BPF_ADD`` example, that is: the +memory location addresed by ``dst_reg + off`` is atomically modified, with +``src_reg`` as the other operand. If the ``BPF_FETCH`` flag is set in the +immediate, then these operations also overwrite ``src_reg`` with the +value that was in memory before it was modified. + +The more special operations are:: + + BPF_XCHG + +This atomically exchanges ``src_reg`` with the value addressed by ``dst_reg + +off``. :: + + BPF_CMPXCHG + +This atomically compares the value addressed by ``dst_reg + off`` with +``R0``. If they match it is replaced with ``src_reg``. In either case, the +value that was there before is zero-extended and loaded back to ``R0``. + +Note that 1 and 2 byte atomic operations are not supported. + +Clang can generate atomic instructions by default when ``-mcpu=v3`` is +enabled. If a lower version for ``-mcpu`` is set, the only atomic instruction +Clang can generate is ``BPF_ADD`` *without* ``BPF_FETCH``. If you need to enable +the atomics features, while keeping a lower ``-mcpu`` version, you can use +``-Xclang -target-feature -Xclang +alu32``. + +You may encounter ``BPF_XADD`` - this is a legacy name for ``BPF_ATOMIC``, +referring to the exclusive-add operation encoded when the immediate field is +zero. + +eBPF has one 16-byte instruction: ``BPF_LD | BPF_DW | BPF_IMM`` which consists +of two consecutive ``struct bpf_insn`` 8-byte blocks and interpreted as single +instruction that loads 64-bit immediate value into a dst_reg. +Classic BPF has similar instruction: ``BPF_LD | BPF_W | BPF_IMM`` which loads +32-bit immediate value into a register. + +.. Links: +.. _eBPF verifier: verifiers.rst diff --git a/Documentation/bpf/libbpf/index.rst b/Documentation/bpf/libbpf/index.rst index 4f8adfc3ab83..4e8c656b539a 100644 --- a/Documentation/bpf/libbpf/index.rst +++ b/Documentation/bpf/libbpf/index.rst @@ -3,8 +3,6 @@ libbpf ====== -For API documentation see the `versioned API documentation site <https://libbpf.readthedocs.io/en/latest/api.html>`_. - .. toctree:: :maxdepth: 1 @@ -14,6 +12,8 @@ For API documentation see the `versioned API documentation site <https://libbpf. This is documentation for libbpf, a userspace library for loading and interacting with bpf programs. +For API documentation see the `versioned API documentation site <https://libbpf.readthedocs.io/en/latest/api.html>`_. + All general BPF questions, including kernel functionality, libbpf APIs and their application, should be sent to bpf@vger.kernel.org mailing list. You can `subscribe <http://vger.kernel.org/vger-lists.html#bpf>`_ to the diff --git a/Documentation/bpf/maps.rst b/Documentation/bpf/maps.rst new file mode 100644 index 000000000000..f41619e312ac --- /dev/null +++ b/Documentation/bpf/maps.rst @@ -0,0 +1,52 @@ + +========= +eBPF maps +========= + +'maps' is a generic storage of different types for sharing data between kernel +and userspace. + +The maps are accessed from user space via BPF syscall, which has commands: + +- create a map with given type and attributes + ``map_fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)`` + using attr->map_type, attr->key_size, attr->value_size, attr->max_entries + returns process-local file descriptor or negative error + +- lookup key in a given map + ``err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)`` + using attr->map_fd, attr->key, attr->value + returns zero and stores found elem into value or negative error + +- create or update key/value pair in a given map + ``err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)`` + using attr->map_fd, attr->key, attr->value + returns zero or negative error + +- find and delete element by key in a given map + ``err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)`` + using attr->map_fd, attr->key + +- to delete map: close(fd) + Exiting process will delete maps automatically + +userspace programs use this syscall to create/access maps that eBPF programs +are concurrently updating. + +maps can have different types: hash, array, bloom filter, radix-tree, etc. + +The map is defined by: + + - type + - max number of elements + - key size in bytes + - value size in bytes + +Map Types +========= + +.. toctree:: + :maxdepth: 1 + :glob: + + map_*
\ No newline at end of file diff --git a/Documentation/bpf/other.rst b/Documentation/bpf/other.rst new file mode 100644 index 000000000000..3d61963403b4 --- /dev/null +++ b/Documentation/bpf/other.rst @@ -0,0 +1,9 @@ +===== +Other +===== + +.. toctree:: + :maxdepth: 1 + + ringbuf + llvm_reloc
\ No newline at end of file diff --git a/Documentation/bpf/bpf_lsm.rst b/Documentation/bpf/prog_lsm.rst index 0dc3fb0d9544..0dc3fb0d9544 100644 --- a/Documentation/bpf/bpf_lsm.rst +++ b/Documentation/bpf/prog_lsm.rst diff --git a/Documentation/bpf/programs.rst b/Documentation/bpf/programs.rst new file mode 100644 index 000000000000..620eb667ac7a --- /dev/null +++ b/Documentation/bpf/programs.rst @@ -0,0 +1,9 @@ +============= +Program Types +============= + +.. toctree:: + :maxdepth: 1 + :glob: + + prog_* diff --git a/Documentation/bpf/syscall_api.rst b/Documentation/bpf/syscall_api.rst new file mode 100644 index 000000000000..f0a1dff087ad --- /dev/null +++ b/Documentation/bpf/syscall_api.rst @@ -0,0 +1,11 @@ +=========== +Syscall API +=========== + +The primary info for the bpf syscall is available in the `man-pages`_ +for `bpf(2)`_. For more information about the userspace API, see +Documentation/userspace-api/ebpf/index.rst. + +.. Links: +.. _man-pages: https://www.kernel.org/doc/man-pages/ +.. _bpf(2): https://man7.org/linux/man-pages/man2/bpf.2.html
\ No newline at end of file diff --git a/Documentation/bpf/test_debug.rst b/Documentation/bpf/test_debug.rst new file mode 100644 index 000000000000..ebf0caceb6a6 --- /dev/null +++ b/Documentation/bpf/test_debug.rst @@ -0,0 +1,9 @@ +========================= +Testing and debugging BPF +========================= + +.. toctree:: + :maxdepth: 1 + + drgn + s390 diff --git a/Documentation/bpf/verifier.rst b/Documentation/bpf/verifier.rst new file mode 100644 index 000000000000..fae5f6273bac --- /dev/null +++ b/Documentation/bpf/verifier.rst @@ -0,0 +1,529 @@ + +============= +eBPF verifier +============= + +The safety of the eBPF program is determined in two steps. + +First step does DAG check to disallow loops and other CFG validation. +In particular it will detect programs that have unreachable instructions. +(though classic BPF checker allows them) + +Second step starts from the first insn and descends all possible paths. +It simulates execution of every insn and observes the state change of +registers and stack. + +At the start of the program the register R1 contains a pointer to context +and has type PTR_TO_CTX. +If verifier sees an insn that does R2=R1, then R2 has now type +PTR_TO_CTX as well and can be used on the right hand side of expression. +If R1=PTR_TO_CTX and insn is R2=R1+R1, then R2=SCALAR_VALUE, +since addition of two valid pointers makes invalid pointer. +(In 'secure' mode verifier will reject any type of pointer arithmetic to make +sure that kernel addresses don't leak to unprivileged users) + +If register was never written to, it's not readable:: + + bpf_mov R0 = R2 + bpf_exit + +will be rejected, since R2 is unreadable at the start of the program. + +After kernel function call, R1-R5 are reset to unreadable and +R0 has a return type of the function. + +Since R6-R9 are callee saved, their state is preserved across the call. + +:: + + bpf_mov R6 = 1 + bpf_call foo + bpf_mov R0 = R6 + bpf_exit + +is a correct program. If there was R1 instead of R6, it would have +been rejected. + +load/store instructions are allowed only with registers of valid types, which +are PTR_TO_CTX, PTR_TO_MAP, PTR_TO_STACK. They are bounds and alignment checked. +For example:: + + bpf_mov R1 = 1 + bpf_mov R2 = 2 + bpf_xadd *(u32 *)(R1 + 3) += R2 + bpf_exit + +will be rejected, since R1 doesn't have a valid pointer type at the time of +execution of instruction bpf_xadd. + +At the start R1 type is PTR_TO_CTX (a pointer to generic ``struct bpf_context``) +A callback is used to customize verifier to restrict eBPF program access to only +certain fields within ctx structure with specified size and alignment. + +For example, the following insn:: + + bpf_ld R0 = *(u32 *)(R6 + 8) + +intends to load a word from address R6 + 8 and store it into R0 +If R6=PTR_TO_CTX, via is_valid_access() callback the verifier will know +that offset 8 of size 4 bytes can be accessed for reading, otherwise +the verifier will reject the program. +If R6=PTR_TO_STACK, then access should be aligned and be within +stack bounds, which are [-MAX_BPF_STACK, 0). In this example offset is 8, +so it will fail verification, since it's out of bounds. + +The verifier will allow eBPF program to read data from stack only after +it wrote into it. + +Classic BPF verifier does similar check with M[0-15] memory slots. +For example:: + + bpf_ld R0 = *(u32 *)(R10 - 4) + bpf_exit + +is invalid program. +Though R10 is correct read-only register and has type PTR_TO_STACK +and R10 - 4 is within stack bounds, there were no stores into that location. + +Pointer register spill/fill is tracked as well, since four (R6-R9) +callee saved registers may not be enough for some programs. + +Allowed function calls are customized with bpf_verifier_ops->get_func_proto() +The eBPF verifier will check that registers match argument constraints. +After the call register R0 will be set to return type of the function. + +Function calls is a main mechanism to extend functionality of eBPF programs. +Socket filters may let programs to call one set of functions, whereas tracing +filters may allow completely different set. + +If a function made accessible to eBPF program, it needs to be thought through +from safety point of view. The verifier will guarantee that the function is +called with valid arguments. + +seccomp vs socket filters have different security restrictions for classic BPF. +Seccomp solves this by two stage verifier: classic BPF verifier is followed +by seccomp verifier. In case of eBPF one configurable verifier is shared for +all use cases. + +See details of eBPF verifier in kernel/bpf/verifier.c + +Register value tracking +======================= + +In order to determine the safety of an eBPF program, the verifier must track +the range of possible values in each register and also in each stack slot. +This is done with ``struct bpf_reg_state``, defined in include/linux/ +bpf_verifier.h, which unifies tracking of scalar and pointer values. Each +register state has a type, which is either NOT_INIT (the register has not been +written to), SCALAR_VALUE (some value which is not usable as a pointer), or a +pointer type. The types of pointers describe their base, as follows: + + + PTR_TO_CTX + Pointer to bpf_context. + CONST_PTR_TO_MAP + Pointer to struct bpf_map. "Const" because arithmetic + on these pointers is forbidden. + PTR_TO_MAP_VALUE + Pointer to the value stored in a map element. + PTR_TO_MAP_VALUE_OR_NULL + Either a pointer to a map value, or NULL; map accesses + (see maps.rst) return this type, which becomes a + PTR_TO_MAP_VALUE when checked != NULL. Arithmetic on + these pointers is forbidden. + PTR_TO_STACK + Frame pointer. + PTR_TO_PACKET + skb->data. + PTR_TO_PACKET_END + skb->data + headlen; arithmetic forbidden. + PTR_TO_SOCKET + Pointer to struct bpf_sock_ops, implicitly refcounted. + PTR_TO_SOCKET_OR_NULL + Either a pointer to a socket, or NULL; socket lookup + returns this type, which becomes a PTR_TO_SOCKET when + checked != NULL. PTR_TO_SOCKET is reference-counted, + so programs must release the reference through the + socket release function before the end of the program. + Arithmetic on these pointers is forbidden. + +However, a pointer may be offset from this base (as a result of pointer +arithmetic), and this is tracked in two parts: the 'fixed offset' and 'variable +offset'. The former is used when an exactly-known value (e.g. an immediate +operand) is added to a pointer, while the latter is used for values which are +not exactly known. The variable offset is also used in SCALAR_VALUEs, to track +the range of possible values in the register. + +The verifier's knowledge about the variable offset consists of: + +* minimum and maximum values as unsigned +* minimum and maximum values as signed + +* knowledge of the values of individual bits, in the form of a 'tnum': a u64 + 'mask' and a u64 'value'. 1s in the mask represent bits whose value is unknown; + 1s in the value represent bits known to be 1. Bits known to be 0 have 0 in both + mask and value; no bit should ever be 1 in both. For example, if a byte is read + into a register from memory, the register's top 56 bits are known zero, while + the low 8 are unknown - which is represented as the tnum (0x0; 0xff). If we + then OR this with 0x40, we get (0x40; 0xbf), then if we add 1 we get (0x0; + 0x1ff), because of potential carries. + +Besides arithmetic, the register state can also be updated by conditional +branches. For instance, if a SCALAR_VALUE is compared > 8, in the 'true' branch +it will have a umin_value (unsigned minimum value) of 9, whereas in the 'false' +branch it will have a umax_value of 8. A signed compare (with BPF_JSGT or +BPF_JSGE) would instead update the signed minimum/maximum values. Information +from the signed and unsigned bounds can be combined; for instance if a value is +first tested < 8 and then tested s> 4, the verifier will conclude that the value +is also > 4 and s< 8, since the bounds prevent crossing the sign boundary. + +PTR_TO_PACKETs with a variable offset part have an 'id', which is common to all +pointers sharing that same variable offset. This is important for packet range +checks: after adding a variable to a packet pointer register A, if you then copy +it to another register B and then add a constant 4 to A, both registers will +share the same 'id' but the A will have a fixed offset of +4. Then if A is +bounds-checked and found to be less than a PTR_TO_PACKET_END, the register B is +now known to have a safe range of at least 4 bytes. See 'Direct packet access', +below, for more on PTR_TO_PACKET ranges. + +The 'id' field is also used on PTR_TO_MAP_VALUE_OR_NULL, common to all copies of +the pointer returned from a map lookup. This means that when one copy is +checked and found to be non-NULL, all copies can become PTR_TO_MAP_VALUEs. +As well as range-checking, the tracked information is also used for enforcing +alignment of pointer accesses. For instance, on most systems the packet pointer +is 2 bytes after a 4-byte alignment. If a program adds 14 bytes to that to jump +over the Ethernet header, then reads IHL and addes (IHL * 4), the resulting +pointer will have a variable offset known to be 4n+2 for some n, so adding the 2 +bytes (NET_IP_ALIGN) gives a 4-byte alignment and so word-sized accesses through +that pointer are safe. +The 'id' field is also used on PTR_TO_SOCKET and PTR_TO_SOCKET_OR_NULL, common +to all copies of the pointer returned from a socket lookup. This has similar +behaviour to the handling for PTR_TO_MAP_VALUE_OR_NULL->PTR_TO_MAP_VALUE, but +it also handles reference tracking for the pointer. PTR_TO_SOCKET implicitly +represents a reference to the corresponding ``struct sock``. To ensure that the +reference is not leaked, it is imperative to NULL-check the reference and in +the non-NULL case, and pass the valid reference to the socket release function. + +Direct packet access +==================== + +In cls_bpf and act_bpf programs the verifier allows direct access to the packet +data via skb->data and skb->data_end pointers. +Ex:: + + 1: r4 = *(u32 *)(r1 +80) /* load skb->data_end */ + 2: r3 = *(u32 *)(r1 +76) /* load skb->data */ + 3: r5 = r3 + 4: r5 += 14 + 5: if r5 > r4 goto pc+16 + R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp + 6: r0 = *(u16 *)(r3 +12) /* access 12 and 13 bytes of the packet */ + +this 2byte load from the packet is safe to do, since the program author +did check ``if (skb->data + 14 > skb->data_end) goto err`` at insn #5 which +means that in the fall-through case the register R3 (which points to skb->data) +has at least 14 directly accessible bytes. The verifier marks it +as R3=pkt(id=0,off=0,r=14). +id=0 means that no additional variables were added to the register. +off=0 means that no additional constants were added. +r=14 is the range of safe access which means that bytes [R3, R3 + 14) are ok. +Note that R5 is marked as R5=pkt(id=0,off=14,r=14). It also points +to the packet data, but constant 14 was added to the register, so +it now points to ``skb->data + 14`` and accessible range is [R5, R5 + 14 - 14) +which is zero bytes. + +More complex packet access may look like:: + + + R0=inv1 R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp + 6: r0 = *(u8 *)(r3 +7) /* load 7th byte from the packet */ + 7: r4 = *(u8 *)(r3 +12) + 8: r4 *= 14 + 9: r3 = *(u32 *)(r1 +76) /* load skb->data */ + 10: r3 += r4 + 11: r2 = r1 + 12: r2 <<= 48 + 13: r2 >>= 48 + 14: r3 += r2 + 15: r2 = r3 + 16: r2 += 8 + 17: r1 = *(u32 *)(r1 +80) /* load skb->data_end */ + 18: if r2 > r1 goto pc+2 + R0=inv(id=0,umax_value=255,var_off=(0x0; 0xff)) R1=pkt_end R2=pkt(id=2,off=8,r=8) R3=pkt(id=2,off=0,r=8) R4=inv(id=0,umax_value=3570,var_off=(0x0; 0xfffe)) R5=pkt(id=0,off=14,r=14) R10=fp + 19: r1 = *(u8 *)(r3 +4) + +The state of the register R3 is R3=pkt(id=2,off=0,r=8) +id=2 means that two ``r3 += rX`` instructions were seen, so r3 points to some +offset within a packet and since the program author did +``if (r3 + 8 > r1) goto err`` at insn #18, the safe range is [R3, R3 + 8). +The verifier only allows 'add'/'sub' operations on packet registers. Any other +operation will set the register state to 'SCALAR_VALUE' and it won't be +available for direct packet access. + +Operation ``r3 += rX`` may overflow and become less than original skb->data, +therefore the verifier has to prevent that. So when it sees ``r3 += rX`` +instruction and rX is more than 16-bit value, any subsequent bounds-check of r3 +against skb->data_end will not give us 'range' information, so attempts to read +through the pointer will give "invalid access to packet" error. + +Ex. after insn ``r4 = *(u8 *)(r3 +12)`` (insn #7 above) the state of r4 is +R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff)) which means that upper 56 bits +of the register are guaranteed to be zero, and nothing is known about the lower +8 bits. After insn ``r4 *= 14`` the state becomes +R4=inv(id=0,umax_value=3570,var_off=(0x0; 0xfffe)), since multiplying an 8-bit +value by constant 14 will keep upper 52 bits as zero, also the least significant +bit will be zero as 14 is even. Similarly ``r2 >>= 48`` will make +R2=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff)), since the shift is not sign +extending. This logic is implemented in adjust_reg_min_max_vals() function, +which calls adjust_ptr_min_max_vals() for adding pointer to scalar (or vice +versa) and adjust_scalar_min_max_vals() for operations on two scalars. + +The end result is that bpf program author can access packet directly +using normal C code as:: + + void *data = (void *)(long)skb->data; + void *data_end = (void *)(long)skb->data_end; + struct eth_hdr *eth = data; + struct iphdr *iph = data + sizeof(*eth); + struct udphdr *udp = data + sizeof(*eth) + sizeof(*iph); + + if (data + sizeof(*eth) + sizeof(*iph) + sizeof(*udp) > data_end) + return 0; + if (eth->h_proto != htons(ETH_P_IP)) + return 0; + if (iph->protocol != IPPROTO_UDP || iph->ihl != 5) + return 0; + if (udp->dest == 53 || udp->source == 9) + ...; + +which makes such programs easier to write comparing to LD_ABS insn +and significantly faster. + +Pruning +======= + +The verifier does not actually walk all possible paths through the program. For +each new branch to analyse, the verifier looks at all the states it's previously +been in when at this instruction. If any of them contain the current state as a +subset, the branch is 'pruned' - that is, the fact that the previous state was +accepted implies the current state would be as well. For instance, if in the +previous state, r1 held a packet-pointer, and in the current state, r1 holds a +packet-pointer with a range as long or longer and at least as strict an +alignment, then r1 is safe. Similarly, if r2 was NOT_INIT before then it can't +have been used by any path from that point, so any value in r2 (including +another NOT_INIT) is safe. The implementation is in the function regsafe(). +Pruning considers not only the registers but also the stack (and any spilled +registers it may hold). They must all be safe for the branch to be pruned. +This is implemented in states_equal(). + +Understanding eBPF verifier messages +==================================== + +The following are few examples of invalid eBPF programs and verifier error +messages as seen in the log: + +Program with unreachable instructions:: + + static struct bpf_insn prog[] = { + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), + }; + +Error: + + unreachable insn 1 + +Program that reads uninitialized register:: + + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + +Error:: + + 0: (bf) r0 = r2 + R2 !read_ok + +Program that doesn't initialize R0 before exiting:: + + BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), + BPF_EXIT_INSN(), + +Error:: + + 0: (bf) r2 = r1 + 1: (95) exit + R0 !read_ok + +Program that accesses stack out of bounds:: + + BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0), + BPF_EXIT_INSN(), + +Error:: + + 0: (7a) *(u64 *)(r10 +8) = 0 + invalid stack off=8 size=8 + +Program that doesn't initialize stack before passing its address into function:: + + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + +Error:: + + 0: (bf) r2 = r10 + 1: (07) r2 += -8 + 2: (b7) r1 = 0x0 + 3: (85) call 1 + invalid indirect read from stack off -8+0 size 8 + +Program that uses invalid map_fd=0 while calling to map_lookup_elem() function:: + + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + +Error:: + + 0: (7a) *(u64 *)(r10 -8) = 0 + 1: (bf) r2 = r10 + 2: (07) r2 += -8 + 3: (b7) r1 = 0x0 + 4: (85) call 1 + fd 0 is not pointing to valid bpf_map + +Program that doesn't check return value of map_lookup_elem() before accessing +map element:: + + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + +Error:: + + 0: (7a) *(u64 *)(r10 -8) = 0 + 1: (bf) r2 = r10 + 2: (07) r2 += -8 + 3: (b7) r1 = 0x0 + 4: (85) call 1 + 5: (7a) *(u64 *)(r0 +0) = 0 + R0 invalid mem access 'map_value_or_null' + +Program that correctly checks map_lookup_elem() returned value for NULL, but +accesses the memory with incorrect alignment:: + + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0), + BPF_EXIT_INSN(), + +Error:: + + 0: (7a) *(u64 *)(r10 -8) = 0 + 1: (bf) r2 = r10 + 2: (07) r2 += -8 + 3: (b7) r1 = 1 + 4: (85) call 1 + 5: (15) if r0 == 0x0 goto pc+1 + R0=map_ptr R10=fp + 6: (7a) *(u64 *)(r0 +4) = 0 + misaligned access off 4 size 8 + +Program that correctly checks map_lookup_elem() returned value for NULL and +accesses memory with correct alignment in one side of 'if' branch, but fails +to do so in the other side of 'if' branch:: + + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + +Error:: + + 0: (7a) *(u64 *)(r10 -8) = 0 + 1: (bf) r2 = r10 + 2: (07) r2 += -8 + 3: (b7) r1 = 1 + 4: (85) call 1 + 5: (15) if r0 == 0x0 goto pc+2 + R0=map_ptr R10=fp + 6: (7a) *(u64 *)(r0 +0) = 0 + 7: (95) exit + + from 5 to 8: R0=imm0 R10=fp + 8: (7a) *(u64 *)(r0 +0) = 1 + R0 invalid mem access 'imm' + +Program that performs a socket lookup then sets the pointer to NULL without +checking it:: + + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + +Error:: + + 0: (b7) r2 = 0 + 1: (63) *(u32 *)(r10 -8) = r2 + 2: (bf) r2 = r10 + 3: (07) r2 += -8 + 4: (b7) r3 = 4 + 5: (b7) r4 = 0 + 6: (b7) r5 = 0 + 7: (85) call bpf_sk_lookup_tcp#65 + 8: (b7) r0 = 0 + 9: (95) exit + Unreleased reference id=1, alloc_insn=7 + +Program that performs a socket lookup but does not NULL-check the returned +value:: + + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp), + BPF_EXIT_INSN(), + +Error:: + + 0: (b7) r2 = 0 + 1: (63) *(u32 *)(r10 -8) = r2 + 2: (bf) r2 = r10 + 3: (07) r2 += -8 + 4: (b7) r3 = 4 + 5: (b7) r4 = 0 + 6: (b7) r5 = 0 + 7: (85) call bpf_sk_lookup_tcp#65 + 8: (95) exit + Unreleased reference id=1, alloc_insn=7 diff --git a/Documentation/conf.py b/Documentation/conf.py index 17f7cee56987..76e5eb5cb62b 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -249,11 +249,16 @@ except ImportError: html_static_path = ['sphinx-static'] -html_context = { - 'css_files': [ - '_static/theme_overrides.css', - ], -} +html_css_files = [ + 'theme_overrides.css', +] + +if major <= 1 and minor < 8: + html_context = { + 'css_files': [ + '_static/theme_overrides.css', + ], + } # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied diff --git a/Documentation/cpu-freq/core.rst b/Documentation/cpu-freq/core.rst index 33cb90bd1d8f..4ceef8e7217c 100644 --- a/Documentation/cpu-freq/core.rst +++ b/Documentation/cpu-freq/core.rst @@ -73,12 +73,12 @@ CPUFREQ_POSTCHANGE. The third argument is a struct cpufreq_freqs with the following values: -===== =========================== -cpu number of the affected CPU +====== ====================================== +policy a pointer to the struct cpufreq_policy old old frequency new new frequency flags flags of the cpufreq driver -===== =========================== +====== ====================================== 3. CPUFreq Table Generation with Operating Performance Point (OPP) ================================================================== diff --git a/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml b/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml index a95960ee3feb..c93fe9d3ea82 100644 --- a/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml +++ b/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml @@ -17,6 +17,7 @@ properties: - const: allwinner,sun7i-a20-can - const: allwinner,sun4i-a10-can - const: allwinner,sun4i-a10-can + - const: allwinner,sun8i-r40-can reg: maxItems: 1 @@ -27,6 +28,19 @@ properties: clocks: maxItems: 1 + resets: + maxItems: 1 + +if: + properties: + compatible: + contains: + const: allwinner,sun8i-r40-can + +then: + required: + - resets + required: - compatible - reg @@ -47,5 +61,15 @@ examples: interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; clocks = <&ccu CLK_APB1_CAN>; }; + - | + #define RST_BUS_CAN 68 + #define CLK_BUS_CAN 91 + can1: can@1c2bc00 { + compatible = "allwinner,sun8i-r40-can"; + reg = <0x01c2bc00 0x400>; + interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&ccu CLK_BUS_CAN>; + resets = <&ccu RST_BUS_CAN>; + }; ... diff --git a/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml b/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml new file mode 100644 index 000000000000..702df848a71d --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/dsa/dsa-port.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Ethernet Switch port Device Tree Bindings + +maintainers: + - Andrew Lunn <andrew@lunn.ch> + - Florian Fainelli <f.fainelli@gmail.com> + - Vivien Didelot <vivien.didelot@gmail.com> + +description: + Ethernet switch port Description + +allOf: + - $ref: "http://devicetree.org/schemas/net/ethernet-controller.yaml#" + +properties: + reg: + description: Port number + + label: + description: + Describes the label associated with this port, which will become + the netdev name + $ref: /schemas/types.yaml#/definitions/string + + link: + description: + Should be a list of phandles to other switch's DSA port. This + port is used as the outgoing port towards the phandle ports. The + full routing information must be given, not just the one hop + routes to neighbouring switches + $ref: /schemas/types.yaml#/definitions/phandle-array + + ethernet: + description: + Should be a phandle to a valid Ethernet device node. This host + device is what the switch port is connected to + $ref: /schemas/types.yaml#/definitions/phandle + + dsa-tag-protocol: + description: + Instead of the default, the switch will use this tag protocol if + possible. Useful when a device supports multiple protocols and + the default is incompatible with the Ethernet device. + enum: + - dsa + - edsa + - ocelot + - ocelot-8021q + - seville + + phy-handle: true + + phy-mode: true + + fixed-link: true + + mac-address: true + + sfp: true + + managed: true + + rx-internal-delay-ps: true + + tx-internal-delay-ps: true + +required: + - reg + +additionalProperties: true + +... diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.yaml b/Documentation/devicetree/bindings/net/dsa/dsa.yaml index 2ad7f79ad371..b9d48e357e77 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.yaml +++ b/Documentation/devicetree/bindings/net/dsa/dsa.yaml @@ -46,65 +46,9 @@ patternProperties: type: object description: Ethernet switch ports - allOf: - - $ref: "http://devicetree.org/schemas/net/ethernet-controller.yaml#" + $ref: dsa-port.yaml# - properties: - reg: - description: Port number - - label: - description: - Describes the label associated with this port, which will become - the netdev name - $ref: /schemas/types.yaml#/definitions/string - - link: - description: - Should be a list of phandles to other switch's DSA port. This - port is used as the outgoing port towards the phandle ports. The - full routing information must be given, not just the one hop - routes to neighbouring switches - $ref: /schemas/types.yaml#/definitions/phandle-array - - ethernet: - description: - Should be a phandle to a valid Ethernet device node. This host - device is what the switch port is connected to - $ref: /schemas/types.yaml#/definitions/phandle - - dsa-tag-protocol: - description: - Instead of the default, the switch will use this tag protocol if - possible. Useful when a device supports multiple protocols and - the default is incompatible with the Ethernet device. - enum: - - dsa - - edsa - - ocelot - - ocelot-8021q - - seville - - phy-handle: true - - phy-mode: true - - fixed-link: true - - mac-address: true - - sfp: true - - managed: true - - rx-internal-delay-ps: true - - tx-internal-delay-ps: true - - required: - - reg - - additionalProperties: false + unevaluatedProperties: false oneOf: - required: diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.yaml b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml index 48de0ace265d..89c21b289447 100644 --- a/Documentation/devicetree/bindings/net/dsa/qca8k.yaml +++ b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml @@ -99,40 +99,9 @@ patternProperties: type: object description: Ethernet switch ports - properties: - reg: - description: Port number - - label: - description: - Describes the label associated with this port, which will become - the netdev name - $ref: /schemas/types.yaml#/definitions/string - - link: - description: - Should be a list of phandles to other switch's DSA port. This - port is used as the outgoing port towards the phandle ports. The - full routing information must be given, not just the one hop - routes to neighbouring switches - $ref: /schemas/types.yaml#/definitions/phandle-array - - ethernet: - description: - Should be a phandle to a valid Ethernet device node. This host - device is what the switch port is connected to - $ref: /schemas/types.yaml#/definitions/phandle - - phy-handle: true - - phy-mode: true - - fixed-link: true - - mac-address: true - - sfp: true + $ref: dsa-port.yaml# + properties: qca,sgmii-rxclk-falling-edge: $ref: /schemas/types.yaml#/definitions/flag description: @@ -154,10 +123,7 @@ patternProperties: SGMII on the QCA8337, it is advised to set this unless a communication issue is observed. - required: - - reg - - additionalProperties: false + unevaluatedProperties: false oneOf: - required: diff --git a/Documentation/devicetree/bindings/net/microchip,lan966x-switch.yaml b/Documentation/devicetree/bindings/net/microchip,lan966x-switch.yaml index d54dc183a033..5bee665d5fcf 100644 --- a/Documentation/devicetree/bindings/net/microchip,lan966x-switch.yaml +++ b/Documentation/devicetree/bindings/net/microchip,lan966x-switch.yaml @@ -56,12 +56,21 @@ properties: ethernet-ports: type: object + + properties: + '#address-cells': + const: 1 + '#size-cells': + const: 0 + + additionalProperties: false + patternProperties: "^port@[0-9a-f]+$": type: object - allOf: - - $ref: "http://devicetree.org/schemas/net/ethernet-controller.yaml#" + $ref: "/schemas/net/ethernet-controller.yaml#" + unevaluatedProperties: false properties: '#address-cells': diff --git a/Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml b/Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml new file mode 100644 index 000000000000..8156a9aeb589 --- /dev/null +++ b/Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/net/vertexcom-mse102x.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: The Vertexcom MSE102x (SPI) Device Tree Bindings + +maintainers: + - Stefan Wahren <stefan.wahren@in-tech.com> + +description: + Vertexcom's MSE102x are a family of HomePlug GreenPHY chips. + They can be connected either via RGMII, RMII or SPI to a host CPU. + + In order to use a MSE102x chip as SPI device, it must be defined as + a child of an SPI master device in the device tree. + + More information can be found at + http://www.vertexcom.com/doc/MSE1022%20Product%20Brief.pdf + +allOf: + - $ref: ethernet-controller.yaml# + +properties: + compatible: + enum: + - vertexcom,mse1021 + - vertexcom,mse1022 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + spi-cpha: true + + spi-cpol: true + + spi-max-frequency: + minimum: 6000000 + maximum: 7142857 + +required: + - compatible + - reg + - interrupts + - spi-cpha + - spi-cpol + - spi-max-frequency + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/irq.h> + spi0 { + #address-cells = <1>; + #size-cells = <0>; + + ethernet@0 { + compatible = "vertexcom,mse1021"; + reg = <0>; + interrupt-parent = <&gpio>; + interrupts = <23 IRQ_TYPE_EDGE_RISING>; + spi-cpha; + spi-cpol; + spi-max-frequency = <7142857>; + }; + }; diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml index 7f987e79337c..52a78a2e362e 100644 --- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml +++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml @@ -33,6 +33,7 @@ properties: - rockchip,rk3328-spi - rockchip,rk3368-spi - rockchip,rk3399-spi + - rockchip,rk3568-spi - rockchip,rv1126-spi - const: rockchip,rk3066-spi diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 5f4ca46bfb13..0d1679b4ff99 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -1274,6 +1274,8 @@ patternProperties: description: Variscite Ltd. "^vdl,.*": description: Van der Laan b.v. + "^vertexcom,.*": + description: Vertexcom Technologies, Inc. "^via,.*": description: VIA Technologies, Inc. "^videostrong,.*": diff --git a/Documentation/filesystems/cifs/ksmbd.rst b/Documentation/filesystems/cifs/ksmbd.rst index a1326157d53f..b0d354fd8066 100644 --- a/Documentation/filesystems/cifs/ksmbd.rst +++ b/Documentation/filesystems/cifs/ksmbd.rst @@ -50,11 +50,11 @@ ksmbd.mountd (user space daemon) -------------------------------- ksmbd.mountd is userspace process to, transfer user account and password that -are registered using ksmbd.adduser(part of utils for user space). Further it +are registered using ksmbd.adduser (part of utils for user space). Further it allows sharing information parameters that parsed from smb.conf to ksmbd in kernel. For the execution part it has a daemon which is continuously running and connected to the kernel interface using netlink socket, it waits for the -requests(dcerpc and share/user info). It handles RPC calls (at a minimum few +requests (dcerpc and share/user info). It handles RPC calls (at a minimum few dozen) that are most important for file server from NetShareEnum and NetServerGetInfo. Complete DCE/RPC response is prepared from the user space and passed over to the associated kernel thread for the client. @@ -154,11 +154,11 @@ Each layer 1. Enable all component prints # sudo ksmbd.control -d "all" -2. Enable one of components(smb, auth, vfs, oplock, ipc, conn, rdma) +2. Enable one of components (smb, auth, vfs, oplock, ipc, conn, rdma) # sudo ksmbd.control -d "smb" -3. Show what prints are enable. - # cat/sys/class/ksmbd-control/debug +3. Show what prints are enabled. + # cat /sys/class/ksmbd-control/debug [smb] auth vfs oplock ipc conn [rdma] 4. Disable prints: diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index bb68d39f03b7..375baca7edcd 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 ================================= -NETWORK FILESYSTEM HELPER LIBRARY +Network Filesystem Helper Library ================================= .. Contents: @@ -37,22 +37,22 @@ into a common call framework. The following services are provided: - * Handles transparent huge pages (THPs). + * Handle folios that span multiple pages. - * Insulates the netfs from VM interface changes. + * Insulate the netfs from VM interface changes. - * Allows the netfs to arbitrarily split reads up into pieces, even ones that - don't match page sizes or page alignments and that may cross pages. + * Allow the netfs to arbitrarily split reads up into pieces, even ones that + don't match folio sizes or folio alignments and that may cross folios. - * Allows the netfs to expand a readahead request in both directions to meet - its needs. + * Allow the netfs to expand a readahead request in both directions to meet its + needs. - * Allows the netfs to partially fulfil a read, which will then be resubmitted. + * Allow the netfs to partially fulfil a read, which will then be resubmitted. - * Handles local caching, allowing cached data and server-read data to be + * Handle local caching, allowing cached data and server-read data to be interleaved for a single request. - * Handles clearing of bufferage that aren't on the server. + * Handle clearing of bufferage that aren't on the server. * Handle retrying of reads that failed, switching reads from the cache to the server as necessary. @@ -70,22 +70,22 @@ Read Helper Functions Three read helpers are provided:: - * void netfs_readahead(struct readahead_control *ractl, - const struct netfs_read_request_ops *ops, - void *netfs_priv);`` - * int netfs_readpage(struct file *file, - struct page *page, - const struct netfs_read_request_ops *ops, - void *netfs_priv); - * int netfs_write_begin(struct file *file, - struct address_space *mapping, - loff_t pos, - unsigned int len, - unsigned int flags, - struct page **_page, - void **_fsdata, - const struct netfs_read_request_ops *ops, - void *netfs_priv); + void netfs_readahead(struct readahead_control *ractl, + const struct netfs_read_request_ops *ops, + void *netfs_priv); + int netfs_readpage(struct file *file, + struct folio *folio, + const struct netfs_read_request_ops *ops, + void *netfs_priv); + int netfs_write_begin(struct file *file, + struct address_space *mapping, + loff_t pos, + unsigned int len, + unsigned int flags, + struct folio **_folio, + void **_fsdata, + const struct netfs_read_request_ops *ops, + void *netfs_priv); Each corresponds to a VM operation, with the addition of a couple of parameters for the use of the read helpers: @@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure. For ->readahead() and ->readpage(), the network filesystem should just jump into the corresponding read helper; whereas for ->write_begin(), it may be a little more complicated as the network filesystem might want to flush -conflicting writes or track dirty data and needs to put the acquired page if an -error occurs after calling the helper. +conflicting writes or track dirty data and needs to put the acquired folio if +an error occurs after calling the helper. The helpers manage the read request, calling back into the network filesystem through the suppplied table of operations. Waits will be performed as @@ -253,7 +253,7 @@ through which it can issue requests and negotiate:: void (*issue_op)(struct netfs_read_subrequest *subreq); bool (*is_still_valid)(struct netfs_read_request *rreq); int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, - struct page *page, void **_fsdata); + struct folio *folio, void **_fsdata); void (*done)(struct netfs_read_request *rreq); void (*cleanup)(struct address_space *mapping, void *netfs_priv); }; @@ -313,13 +313,14 @@ The operations are as follows: There is no return value; the netfs_subreq_terminated() function should be called to indicate whether or not the operation succeeded and how much data - it transferred. The filesystem also should not deal with setting pages + it transferred. The filesystem also should not deal with setting folios uptodate, unlocking them or dropping their refs - the helpers need to deal with this as they have to coordinate with copying to the local cache. - Note that the helpers have the pages locked, but not pinned. It is possible - to use the ITER_XARRAY iov iterator to refer to the range of the inode that - is being operated upon without the need to allocate large bvec tables. + Note that the helpers have the folios locked, but not pinned. It is + possible to use the ITER_XARRAY iov iterator to refer to the range of the + inode that is being operated upon without the need to allocate large bvec + tables. * ``is_still_valid()`` @@ -330,15 +331,15 @@ The operations are as follows: * ``check_write_begin()`` [Optional] This is called from the netfs_write_begin() helper once it has - allocated/grabbed the page to be modified to allow the filesystem to flush + allocated/grabbed the folio to be modified to allow the filesystem to flush conflicting state before allowing it to be modified. - It should return 0 if everything is now fine, -EAGAIN if the page should be + It should return 0 if everything is now fine, -EAGAIN if the folio should be regrabbed and any other error code to abort the operation. * ``done`` - [Optional] This is called after the pages in the request have all been + [Optional] This is called after the folios in the request have all been unlocked (and marked uptodate if applicable). * ``cleanup`` @@ -390,7 +391,7 @@ The read helpers work by the following general procedure: * If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the end of the slice instead of reissuing. - * Once the data is read, the pages that have been fully read/cleared: + * Once the data is read, the folios that have been fully read/cleared: * Will be marked uptodate. @@ -398,11 +399,11 @@ The read helpers work by the following general procedure: * Unlocked - * Any pages that need writing to the cache will then have DIO writes issued. + * Any folios that need writing to the cache will then have DIO writes issued. * Synchronous operations will wait for reading to be complete. - * Writes to the cache will proceed asynchronously and the pages will have the + * Writes to the cache will proceed asynchronously and the folios will have the PG_fscache mark removed when that completes. * The request structures will be cleaned up when everything has completed. @@ -452,6 +453,9 @@ operation table looks like the following:: netfs_io_terminated_t term_func, void *term_func_priv); + int (*prepare_write)(struct netfs_cache_resources *cres, + loff_t *_start, size_t *_len, loff_t i_size); + int (*write)(struct netfs_cache_resources *cres, loff_t start_pos, struct iov_iter *iter, @@ -509,6 +513,14 @@ The methods defined in the table are: indicating whether the termination is definitely happening in the caller's context. + * ``prepare_write()`` + + [Required] Called to adjust a write to the cache and check that there is + sufficient space in the cache. The start and length values indicate the + size of the write that netfslib is proposing, and this can be adjusted by + the cache to respect DIO boundaries. The file size is passed for + information. + * ``write()`` [Required] Called to write to the cache. The start file offset is given @@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where there isn't a read request structure as well, such as writing dirty data to the cache. + +API Function Reference +====================== + .. kernel-doc:: include/linux/netfs.h +.. kernel-doc:: fs/netfs/read_helper.c diff --git a/Documentation/locking/locktypes.rst b/Documentation/locking/locktypes.rst index ddada4a53749..4fd7b70fcde1 100644 --- a/Documentation/locking/locktypes.rst +++ b/Documentation/locking/locktypes.rst @@ -439,11 +439,9 @@ preemption. The following substitution works on both kernels:: spin_lock(&p->lock); p->count += this_cpu_read(var2); -On a non-PREEMPT_RT kernel migrate_disable() maps to preempt_disable() -which makes the above code fully equivalent. On a PREEMPT_RT kernel migrate_disable() ensures that the task is pinned on the current CPU which in turn guarantees that the per-CPU access to var1 and var2 are staying on -the same CPU. +the same CPU while the task remains preemptible. The migrate_disable() substitution is not valid for the following scenario:: @@ -456,9 +454,8 @@ scenario:: p = this_cpu_ptr(&var1); p->val = func2(); -While correct on a non-PREEMPT_RT kernel, this breaks on PREEMPT_RT because -here migrate_disable() does not protect against reentrancy from a -preempting task. A correct substitution for this case is:: +This breaks because migrate_disable() does not protect against reentrancy from +a preempting task. A correct substitution for this case is:: func() { diff --git a/Documentation/networking/filter.rst b/Documentation/networking/filter.rst index ce2b8e8bb9ab..43cdc4d34745 100644 --- a/Documentation/networking/filter.rst +++ b/Documentation/networking/filter.rst @@ -6,6 +6,13 @@ Linux Socket Filtering aka Berkeley Packet Filter (BPF) ======================================================= +Notice +------ + +This file used to document the eBPF format and mechanisms even when not +related to socket filtering. The ../bpf/index.rst has more details +on eBPF. + Introduction ------------ @@ -617,15 +624,11 @@ format with similar underlying principles from BPF described in previous paragraphs is being used. However, the instruction set format is modelled closer to the underlying architecture to mimic native instruction sets, so that a better performance can be achieved (more details later). This new -ISA is called 'eBPF' or 'internal BPF' interchangeably. (Note: eBPF which +ISA is called eBPF. See the ../bpf/index.rst for details. (Note: eBPF which originates from [e]xtended BPF is not the same as BPF extensions! While eBPF is an ISA, BPF extensions date back to classic BPF's 'overloading' of BPF_LD | BPF_{B,H,W} | BPF_ABS instruction.) -It is designed to be JITed with one to one mapping, which can also open up -the possibility for GCC/LLVM compilers to generate optimized eBPF code through -an eBPF backend that performs almost as fast as natively compiled code. - The new instruction set was originally designed with the possible goal in mind to write programs in "restricted C" and compile into eBPF with a optional GCC/LLVM backend, so that it can just-in-time map to modern 64-bit CPUs with @@ -650,1032 +653,11 @@ Currently, the classic BPF format is being used for JITing on most sparc64, arm32, riscv64, riscv32 perform JIT compilation from eBPF instruction set. -Some core changes of the new internal format: - -- Number of registers increase from 2 to 10: - - The old format had two registers A and X, and a hidden frame pointer. The - new layout extends this to be 10 internal registers and a read-only frame - pointer. Since 64-bit CPUs are passing arguments to functions via registers - the number of args from eBPF program to in-kernel function is restricted - to 5 and one register is used to accept return value from an in-kernel - function. Natively, x86_64 passes first 6 arguments in registers, aarch64/ - sparcv9/mips64 have 7 - 8 registers for arguments; x86_64 has 6 callee saved - registers, and aarch64/sparcv9/mips64 have 11 or more callee saved registers. - - Therefore, eBPF calling convention is defined as: - - * R0 - return value from in-kernel function, and exit value for eBPF program - * R1 - R5 - arguments from eBPF program to in-kernel function - * R6 - R9 - callee saved registers that in-kernel function will preserve - * R10 - read-only frame pointer to access stack - - Thus, all eBPF registers map one to one to HW registers on x86_64, aarch64, - etc, and eBPF calling convention maps directly to ABIs used by the kernel on - 64-bit architectures. - - On 32-bit architectures JIT may map programs that use only 32-bit arithmetic - and may let more complex programs to be interpreted. - - R0 - R5 are scratch registers and eBPF program needs spill/fill them if - necessary across calls. Note that there is only one eBPF program (== one - eBPF main routine) and it cannot call other eBPF functions, it can only - call predefined in-kernel functions, though. - -- Register width increases from 32-bit to 64-bit: - - Still, the semantics of the original 32-bit ALU operations are preserved - via 32-bit subregisters. All eBPF registers are 64-bit with 32-bit lower - subregisters that zero-extend into 64-bit if they are being written to. - That behavior maps directly to x86_64 and arm64 subregister definition, but - makes other JITs more difficult. - - 32-bit architectures run 64-bit internal BPF programs via interpreter. - Their JITs may convert BPF programs that only use 32-bit subregisters into - native instruction set and let the rest being interpreted. - - Operation is 64-bit, because on 64-bit architectures, pointers are also - 64-bit wide, and we want to pass 64-bit values in/out of kernel functions, - so 32-bit eBPF registers would otherwise require to define register-pair - ABI, thus, there won't be able to use a direct eBPF register to HW register - mapping and JIT would need to do combine/split/move operations for every - register in and out of the function, which is complex, bug prone and slow. - Another reason is the use of atomic 64-bit counters. - -- Conditional jt/jf targets replaced with jt/fall-through: - - While the original design has constructs such as ``if (cond) jump_true; - else jump_false;``, they are being replaced into alternative constructs like - ``if (cond) jump_true; /* else fall-through */``. - -- Introduces bpf_call insn and register passing convention for zero overhead - calls from/to other kernel functions: - - Before an in-kernel function call, the internal BPF program needs to - place function arguments into R1 to R5 registers to satisfy calling - convention, then the interpreter will take them from registers and pass - to in-kernel function. If R1 - R5 registers are mapped to CPU registers - that are used for argument passing on given architecture, the JIT compiler - doesn't need to emit extra moves. Function arguments will be in the correct - registers and BPF_CALL instruction will be JITed as single 'call' HW - instruction. This calling convention was picked to cover common call - situations without performance penalty. - - After an in-kernel function call, R1 - R5 are reset to unreadable and R0 has - a return value of the function. Since R6 - R9 are callee saved, their state - is preserved across the call. - - For example, consider three C functions:: - - u64 f1() { return (*_f2)(1); } - u64 f2(u64 a) { return f3(a + 1, a); } - u64 f3(u64 a, u64 b) { return a - b; } - - GCC can compile f1, f3 into x86_64:: - - f1: - movl $1, %edi - movq _f2(%rip), %rax - jmp *%rax - f3: - movq %rdi, %rax - subq %rsi, %rax - ret - - Function f2 in eBPF may look like:: - - f2: - bpf_mov R2, R1 - bpf_add R1, 1 - bpf_call f3 - bpf_exit - - If f2 is JITed and the pointer stored to ``_f2``. The calls f1 -> f2 -> f3 and - returns will be seamless. Without JIT, __bpf_prog_run() interpreter needs to - be used to call into f2. - - For practical reasons all eBPF programs have only one argument 'ctx' which is - already placed into R1 (e.g. on __bpf_prog_run() startup) and the programs - can call kernel functions with up to 5 arguments. Calls with 6 or more arguments - are currently not supported, but these restrictions can be lifted if necessary - in the future. - - On 64-bit architectures all register map to HW registers one to one. For - example, x86_64 JIT compiler can map them as ... - - :: - - R0 - rax - R1 - rdi - R2 - rsi - R3 - rdx - R4 - rcx - R5 - r8 - R6 - rbx - R7 - r13 - R8 - r14 - R9 - r15 - R10 - rbp - - ... since x86_64 ABI mandates rdi, rsi, rdx, rcx, r8, r9 for argument passing - and rbx, r12 - r15 are callee saved. - - Then the following internal BPF pseudo-program:: - - bpf_mov R6, R1 /* save ctx */ - bpf_mov R2, 2 - bpf_mov R3, 3 - bpf_mov R4, 4 - bpf_mov R5, 5 - bpf_call foo - bpf_mov R7, R0 /* save foo() return value */ - bpf_mov R1, R6 /* restore ctx for next call */ - bpf_mov R2, 6 - bpf_mov R3, 7 - bpf_mov R4, 8 - bpf_mov R5, 9 - bpf_call bar - bpf_add R0, R7 - bpf_exit - - After JIT to x86_64 may look like:: - - push %rbp - mov %rsp,%rbp - sub $0x228,%rsp - mov %rbx,-0x228(%rbp) - mov %r13,-0x220(%rbp) - mov %rdi,%rbx - mov $0x2,%esi - mov $0x3,%edx - mov $0x4,%ecx - mov $0x5,%r8d - callq foo - mov %rax,%r13 - mov %rbx,%rdi - mov $0x6,%esi - mov $0x7,%edx - mov $0x8,%ecx - mov $0x9,%r8d - callq bar - add %r13,%rax - mov -0x228(%rbp),%rbx - mov -0x220(%rbp),%r13 - leaveq - retq - - Which is in this example equivalent in C to:: - - u64 bpf_filter(u64 ctx) - { - return foo(ctx, 2, 3, 4, 5) + bar(ctx, 6, 7, 8, 9); - } - - In-kernel functions foo() and bar() with prototype: u64 (*)(u64 arg1, u64 - arg2, u64 arg3, u64 arg4, u64 arg5); will receive arguments in proper - registers and place their return value into ``%rax`` which is R0 in eBPF. - Prologue and epilogue are emitted by JIT and are implicit in the - interpreter. R0-R5 are scratch registers, so eBPF program needs to preserve - them across the calls as defined by calling convention. - - For example the following program is invalid:: - - bpf_mov R1, 1 - bpf_call foo - bpf_mov R0, R1 - bpf_exit - - After the call the registers R1-R5 contain junk values and cannot be read. - An in-kernel eBPF verifier is used to validate internal BPF programs. - -Also in the new design, eBPF is limited to 4096 insns, which means that any -program will terminate quickly and will only call a fixed number of kernel -functions. Original BPF and the new format are two operand instructions, -which helps to do one-to-one mapping between eBPF insn and x86 insn during JIT. - -The input context pointer for invoking the interpreter function is generic, -its content is defined by a specific use case. For seccomp register R1 points -to seccomp_data, for converted BPF filters R1 points to a skb. - -A program, that is translated internally consists of the following elements:: - - op:16, jt:8, jf:8, k:32 ==> op:8, dst_reg:4, src_reg:4, off:16, imm:32 - -So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field -has room for new instructions. Some of them may use 16/24/32 byte encoding. New -instructions must be multiple of 8 bytes to preserve backward compatibility. - -Internal BPF is a general purpose RISC instruction set. Not every register and -every instruction are used during translation from original BPF to new format. -For example, socket filters are not using ``exclusive add`` instruction, but -tracing filters may do to maintain counters of events, for example. Register R9 -is not used by socket filters either, but more complex filters may be running -out of registers and would have to resort to spill/fill to stack. - -Internal BPF can be used as a generic assembler for last step performance -optimizations, socket filters and seccomp are using it as assembler. Tracing -filters may use it as assembler to generate code from kernel. In kernel usage -may not be bounded by security considerations, since generated internal BPF code -may be optimizing internal code path and not being exposed to the user space. -Safety of internal BPF can come from a verifier (TBD). In such use cases as -described, it may be used as safe instruction set. - -Just like the original BPF, the new format runs within a controlled environment, -is deterministic and the kernel can easily prove that. The safety of the program -can be determined in two steps: first step does depth-first-search to disallow -loops and other CFG validation; second step starts from the first insn and -descends all possible paths. It simulates execution of every insn and observes -the state change of registers and stack. - -eBPF opcode encoding --------------------- - -eBPF is reusing most of the opcode encoding from classic to simplify conversion -of classic BPF to eBPF. For arithmetic and jump instructions the 8-bit 'code' -field is divided into three parts:: - - +----------------+--------+--------------------+ - | 4 bits | 1 bit | 3 bits | - | operation code | source | instruction class | - +----------------+--------+--------------------+ - (MSB) (LSB) - -Three LSB bits store instruction class which is one of: - - =================== =============== - Classic BPF classes eBPF classes - =================== =============== - BPF_LD 0x00 BPF_LD 0x00 - BPF_LDX 0x01 BPF_LDX 0x01 - BPF_ST 0x02 BPF_ST 0x02 - BPF_STX 0x03 BPF_STX 0x03 - BPF_ALU 0x04 BPF_ALU 0x04 - BPF_JMP 0x05 BPF_JMP 0x05 - BPF_RET 0x06 BPF_JMP32 0x06 - BPF_MISC 0x07 BPF_ALU64 0x07 - =================== =============== - -When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ... - - :: - - BPF_K 0x00 - BPF_X 0x08 - - * in classic BPF, this means:: - - BPF_SRC(code) == BPF_X - use register X as source operand - BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand - - * in eBPF, this means:: - - BPF_SRC(code) == BPF_X - use 'src_reg' register as source operand - BPF_SRC(code) == BPF_K - use 32-bit immediate as source operand - -... and four MSB bits store operation code. - -If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of:: - - BPF_ADD 0x00 - BPF_SUB 0x10 - BPF_MUL 0x20 - BPF_DIV 0x30 - BPF_OR 0x40 - BPF_AND 0x50 - BPF_LSH 0x60 - BPF_RSH 0x70 - BPF_NEG 0x80 - BPF_MOD 0x90 - BPF_XOR 0xa0 - BPF_MOV 0xb0 /* eBPF only: mov reg to reg */ - BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */ - BPF_END 0xd0 /* eBPF only: endianness conversion */ - -If BPF_CLASS(code) == BPF_JMP or BPF_JMP32 [ in eBPF ], BPF_OP(code) is one of:: - - BPF_JA 0x00 /* BPF_JMP only */ - BPF_JEQ 0x10 - BPF_JGT 0x20 - BPF_JGE 0x30 - BPF_JSET 0x40 - BPF_JNE 0x50 /* eBPF only: jump != */ - BPF_JSGT 0x60 /* eBPF only: signed '>' */ - BPF_JSGE 0x70 /* eBPF only: signed '>=' */ - BPF_CALL 0x80 /* eBPF BPF_JMP only: function call */ - BPF_EXIT 0x90 /* eBPF BPF_JMP only: function return */ - BPF_JLT 0xa0 /* eBPF only: unsigned '<' */ - BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */ - BPF_JSLT 0xc0 /* eBPF only: signed '<' */ - BPF_JSLE 0xd0 /* eBPF only: signed '<=' */ - -So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF -and eBPF. There are only two registers in classic BPF, so it means A += X. -In eBPF it means dst_reg = (u32) dst_reg + (u32) src_reg; similarly, -BPF_XOR | BPF_K | BPF_ALU means A ^= imm32 in classic BPF and analogous -src_reg = (u32) src_reg ^ (u32) imm32 in eBPF. - -Classic BPF is using BPF_MISC class to represent A = X and X = A moves. -eBPF is using BPF_MOV | BPF_X | BPF_ALU code instead. Since there are no -BPF_MISC operations in eBPF, the class 7 is used as BPF_ALU64 to mean -exactly the same operations as BPF_ALU, but with 64-bit wide operands -instead. So BPF_ADD | BPF_X | BPF_ALU64 means 64-bit addition, i.e.: -dst_reg = dst_reg + src_reg - -Classic BPF wastes the whole BPF_RET class to represent a single ``ret`` -operation. Classic BPF_RET | BPF_K means copy imm32 into return register -and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT -in eBPF means function exit only. The eBPF program needs to store return -value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is used as -BPF_JMP32 to mean exactly the same operations as BPF_JMP, but with 32-bit wide -operands for the comparisons instead. - -For load and store instructions the 8-bit 'code' field is divided as:: - - +--------+--------+-------------------+ - | 3 bits | 2 bits | 3 bits | - | mode | size | instruction class | - +--------+--------+-------------------+ - (MSB) (LSB) - -Size modifier is one of ... - -:: - - BPF_W 0x00 /* word */ - BPF_H 0x08 /* half word */ - BPF_B 0x10 /* byte */ - BPF_DW 0x18 /* eBPF only, double word */ - -... which encodes size of load/store operation:: - - B - 1 byte - H - 2 byte - W - 4 byte - DW - 8 byte (eBPF only) - -Mode modifier is one of:: - - BPF_IMM 0x00 /* used for 32-bit mov in classic BPF and 64-bit in eBPF */ - BPF_ABS 0x20 - BPF_IND 0x40 - BPF_MEM 0x60 - BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */ - BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */ - BPF_ATOMIC 0xc0 /* eBPF only, atomic operations */ - -eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and -(BPF_IND | <size> | BPF_LD) which are used to access packet data. - -They had to be carried over from classic to have strong performance of -socket filters running in eBPF interpreter. These instructions can only -be used when interpreter context is a pointer to ``struct sk_buff`` and -have seven implicit operands. Register R6 is an implicit input that must -contain pointer to sk_buff. Register R0 is an implicit output which contains -the data fetched from the packet. Registers R1-R5 are scratch registers -and must not be used to store the data across BPF_ABS | BPF_LD or -BPF_IND | BPF_LD instructions. - -These instructions have implicit program exit condition as well. When -eBPF program is trying to access the data beyond the packet boundary, -the interpreter will abort the execution of the program. JIT compilers -therefore must preserve this property. src_reg and imm32 fields are -explicit inputs to these instructions. - -For example:: - - BPF_IND | BPF_W | BPF_LD means: - - R0 = ntohl(*(u32 *) (((struct sk_buff *) R6)->data + src_reg + imm32)) - and R1 - R5 were scratched. - -Unlike classic BPF instruction set, eBPF has generic load/store operations:: - - BPF_MEM | <size> | BPF_STX: *(size *) (dst_reg + off) = src_reg - BPF_MEM | <size> | BPF_ST: *(size *) (dst_reg + off) = imm32 - BPF_MEM | <size> | BPF_LDX: dst_reg = *(size *) (src_reg + off) - -Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. - -It also includes atomic operations, which use the immediate field for extra -encoding:: - - .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg - .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg - -The basic atomic operations supported are:: - - BPF_ADD - BPF_AND - BPF_OR - BPF_XOR - -Each having equivalent semantics with the ``BPF_ADD`` example, that is: the -memory location addresed by ``dst_reg + off`` is atomically modified, with -``src_reg`` as the other operand. If the ``BPF_FETCH`` flag is set in the -immediate, then these operations also overwrite ``src_reg`` with the -value that was in memory before it was modified. - -The more special operations are:: - - BPF_XCHG - -This atomically exchanges ``src_reg`` with the value addressed by ``dst_reg + -off``. :: - - BPF_CMPXCHG - -This atomically compares the value addressed by ``dst_reg + off`` with -``R0``. If they match it is replaced with ``src_reg``. In either case, the -value that was there before is zero-extended and loaded back to ``R0``. - -Note that 1 and 2 byte atomic operations are not supported. - -Clang can generate atomic instructions by default when ``-mcpu=v3`` is -enabled. If a lower version for ``-mcpu`` is set, the only atomic instruction -Clang can generate is ``BPF_ADD`` *without* ``BPF_FETCH``. If you need to enable -the atomics features, while keeping a lower ``-mcpu`` version, you can use -``-Xclang -target-feature -Xclang +alu32``. - -You may encounter ``BPF_XADD`` - this is a legacy name for ``BPF_ATOMIC``, -referring to the exclusive-add operation encoded when the immediate field is -zero. - -eBPF has one 16-byte instruction: ``BPF_LD | BPF_DW | BPF_IMM`` which consists -of two consecutive ``struct bpf_insn`` 8-byte blocks and interpreted as single -instruction that loads 64-bit immediate value into a dst_reg. -Classic BPF has similar instruction: ``BPF_LD | BPF_W | BPF_IMM`` which loads -32-bit immediate value into a register. - -eBPF verifier -------------- -The safety of the eBPF program is determined in two steps. - -First step does DAG check to disallow loops and other CFG validation. -In particular it will detect programs that have unreachable instructions. -(though classic BPF checker allows them) - -Second step starts from the first insn and descends all possible paths. -It simulates execution of every insn and observes the state change of -registers and stack. - -At the start of the program the register R1 contains a pointer to context -and has type PTR_TO_CTX. -If verifier sees an insn that does R2=R1, then R2 has now type -PTR_TO_CTX as well and can be used on the right hand side of expression. -If R1=PTR_TO_CTX and insn is R2=R1+R1, then R2=SCALAR_VALUE, -since addition of two valid pointers makes invalid pointer. -(In 'secure' mode verifier will reject any type of pointer arithmetic to make -sure that kernel addresses don't leak to unprivileged users) - -If register was never written to, it's not readable:: - - bpf_mov R0 = R2 - bpf_exit - -will be rejected, since R2 is unreadable at the start of the program. - -After kernel function call, R1-R5 are reset to unreadable and -R0 has a return type of the function. - -Since R6-R9 are callee saved, their state is preserved across the call. - -:: - - bpf_mov R6 = 1 - bpf_call foo - bpf_mov R0 = R6 - bpf_exit - -is a correct program. If there was R1 instead of R6, it would have -been rejected. - -load/store instructions are allowed only with registers of valid types, which -are PTR_TO_CTX, PTR_TO_MAP, PTR_TO_STACK. They are bounds and alignment checked. -For example:: - - bpf_mov R1 = 1 - bpf_mov R2 = 2 - bpf_xadd *(u32 *)(R1 + 3) += R2 - bpf_exit - -will be rejected, since R1 doesn't have a valid pointer type at the time of -execution of instruction bpf_xadd. - -At the start R1 type is PTR_TO_CTX (a pointer to generic ``struct bpf_context``) -A callback is used to customize verifier to restrict eBPF program access to only -certain fields within ctx structure with specified size and alignment. - -For example, the following insn:: - - bpf_ld R0 = *(u32 *)(R6 + 8) - -intends to load a word from address R6 + 8 and store it into R0 -If R6=PTR_TO_CTX, via is_valid_access() callback the verifier will know -that offset 8 of size 4 bytes can be accessed for reading, otherwise -the verifier will reject the program. -If R6=PTR_TO_STACK, then access should be aligned and be within -stack bounds, which are [-MAX_BPF_STACK, 0). In this example offset is 8, -so it will fail verification, since it's out of bounds. - -The verifier will allow eBPF program to read data from stack only after -it wrote into it. - -Classic BPF verifier does similar check with M[0-15] memory slots. -For example:: - - bpf_ld R0 = *(u32 *)(R10 - 4) - bpf_exit - -is invalid program. -Though R10 is correct read-only register and has type PTR_TO_STACK -and R10 - 4 is within stack bounds, there were no stores into that location. - -Pointer register spill/fill is tracked as well, since four (R6-R9) -callee saved registers may not be enough for some programs. - -Allowed function calls are customized with bpf_verifier_ops->get_func_proto() -The eBPF verifier will check that registers match argument constraints. -After the call register R0 will be set to return type of the function. - -Function calls is a main mechanism to extend functionality of eBPF programs. -Socket filters may let programs to call one set of functions, whereas tracing -filters may allow completely different set. - -If a function made accessible to eBPF program, it needs to be thought through -from safety point of view. The verifier will guarantee that the function is -called with valid arguments. - -seccomp vs socket filters have different security restrictions for classic BPF. -Seccomp solves this by two stage verifier: classic BPF verifier is followed -by seccomp verifier. In case of eBPF one configurable verifier is shared for -all use cases. - -See details of eBPF verifier in kernel/bpf/verifier.c - -Register value tracking ------------------------ -In order to determine the safety of an eBPF program, the verifier must track -the range of possible values in each register and also in each stack slot. -This is done with ``struct bpf_reg_state``, defined in include/linux/ -bpf_verifier.h, which unifies tracking of scalar and pointer values. Each -register state has a type, which is either NOT_INIT (the register has not been -written to), SCALAR_VALUE (some value which is not usable as a pointer), or a -pointer type. The types of pointers describe their base, as follows: - - - PTR_TO_CTX - Pointer to bpf_context. - CONST_PTR_TO_MAP - Pointer to struct bpf_map. "Const" because arithmetic - on these pointers is forbidden. - PTR_TO_MAP_VALUE - Pointer to the value stored in a map element. - PTR_TO_MAP_VALUE_OR_NULL - Either a pointer to a map value, or NULL; map accesses - (see section 'eBPF maps', below) return this type, - which becomes a PTR_TO_MAP_VALUE when checked != NULL. - Arithmetic on these pointers is forbidden. - PTR_TO_STACK - Frame pointer. - PTR_TO_PACKET - skb->data. - PTR_TO_PACKET_END - skb->data + headlen; arithmetic forbidden. - PTR_TO_SOCKET - Pointer to struct bpf_sock_ops, implicitly refcounted. - PTR_TO_SOCKET_OR_NULL - Either a pointer to a socket, or NULL; socket lookup - returns this type, which becomes a PTR_TO_SOCKET when - checked != NULL. PTR_TO_SOCKET is reference-counted, - so programs must release the reference through the - socket release function before the end of the program. - Arithmetic on these pointers is forbidden. - -However, a pointer may be offset from this base (as a result of pointer -arithmetic), and this is tracked in two parts: the 'fixed offset' and 'variable -offset'. The former is used when an exactly-known value (e.g. an immediate -operand) is added to a pointer, while the latter is used for values which are -not exactly known. The variable offset is also used in SCALAR_VALUEs, to track -the range of possible values in the register. - -The verifier's knowledge about the variable offset consists of: - -* minimum and maximum values as unsigned -* minimum and maximum values as signed - -* knowledge of the values of individual bits, in the form of a 'tnum': a u64 - 'mask' and a u64 'value'. 1s in the mask represent bits whose value is unknown; - 1s in the value represent bits known to be 1. Bits known to be 0 have 0 in both - mask and value; no bit should ever be 1 in both. For example, if a byte is read - into a register from memory, the register's top 56 bits are known zero, while - the low 8 are unknown - which is represented as the tnum (0x0; 0xff). If we - then OR this with 0x40, we get (0x40; 0xbf), then if we add 1 we get (0x0; - 0x1ff), because of potential carries. - -Besides arithmetic, the register state can also be updated by conditional -branches. For instance, if a SCALAR_VALUE is compared > 8, in the 'true' branch -it will have a umin_value (unsigned minimum value) of 9, whereas in the 'false' -branch it will have a umax_value of 8. A signed compare (with BPF_JSGT or -BPF_JSGE) would instead update the signed minimum/maximum values. Information -from the signed and unsigned bounds can be combined; for instance if a value is -first tested < 8 and then tested s> 4, the verifier will conclude that the value -is also > 4 and s< 8, since the bounds prevent crossing the sign boundary. - -PTR_TO_PACKETs with a variable offset part have an 'id', which is common to all -pointers sharing that same variable offset. This is important for packet range -checks: after adding a variable to a packet pointer register A, if you then copy -it to another register B and then add a constant 4 to A, both registers will -share the same 'id' but the A will have a fixed offset of +4. Then if A is -bounds-checked and found to be less than a PTR_TO_PACKET_END, the register B is -now known to have a safe range of at least 4 bytes. See 'Direct packet access', -below, for more on PTR_TO_PACKET ranges. - -The 'id' field is also used on PTR_TO_MAP_VALUE_OR_NULL, common to all copies of -the pointer returned from a map lookup. This means that when one copy is -checked and found to be non-NULL, all copies can become PTR_TO_MAP_VALUEs. -As well as range-checking, the tracked information is also used for enforcing -alignment of pointer accesses. For instance, on most systems the packet pointer -is 2 bytes after a 4-byte alignment. If a program adds 14 bytes to that to jump -over the Ethernet header, then reads IHL and addes (IHL * 4), the resulting -pointer will have a variable offset known to be 4n+2 for some n, so adding the 2 -bytes (NET_IP_ALIGN) gives a 4-byte alignment and so word-sized accesses through -that pointer are safe. -The 'id' field is also used on PTR_TO_SOCKET and PTR_TO_SOCKET_OR_NULL, common -to all copies of the pointer returned from a socket lookup. This has similar -behaviour to the handling for PTR_TO_MAP_VALUE_OR_NULL->PTR_TO_MAP_VALUE, but -it also handles reference tracking for the pointer. PTR_TO_SOCKET implicitly -represents a reference to the corresponding ``struct sock``. To ensure that the -reference is not leaked, it is imperative to NULL-check the reference and in -the non-NULL case, and pass the valid reference to the socket release function. - -Direct packet access --------------------- -In cls_bpf and act_bpf programs the verifier allows direct access to the packet -data via skb->data and skb->data_end pointers. -Ex:: - - 1: r4 = *(u32 *)(r1 +80) /* load skb->data_end */ - 2: r3 = *(u32 *)(r1 +76) /* load skb->data */ - 3: r5 = r3 - 4: r5 += 14 - 5: if r5 > r4 goto pc+16 - R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp - 6: r0 = *(u16 *)(r3 +12) /* access 12 and 13 bytes of the packet */ - -this 2byte load from the packet is safe to do, since the program author -did check ``if (skb->data + 14 > skb->data_end) goto err`` at insn #5 which -means that in the fall-through case the register R3 (which points to skb->data) -has at least 14 directly accessible bytes. The verifier marks it -as R3=pkt(id=0,off=0,r=14). -id=0 means that no additional variables were added to the register. -off=0 means that no additional constants were added. -r=14 is the range of safe access which means that bytes [R3, R3 + 14) are ok. -Note that R5 is marked as R5=pkt(id=0,off=14,r=14). It also points -to the packet data, but constant 14 was added to the register, so -it now points to ``skb->data + 14`` and accessible range is [R5, R5 + 14 - 14) -which is zero bytes. - -More complex packet access may look like:: - - - R0=inv1 R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp - 6: r0 = *(u8 *)(r3 +7) /* load 7th byte from the packet */ - 7: r4 = *(u8 *)(r3 +12) - 8: r4 *= 14 - 9: r3 = *(u32 *)(r1 +76) /* load skb->data */ - 10: r3 += r4 - 11: r2 = r1 - 12: r2 <<= 48 - 13: r2 >>= 48 - 14: r3 += r2 - 15: r2 = r3 - 16: r2 += 8 - 17: r1 = *(u32 *)(r1 +80) /* load skb->data_end */ - 18: if r2 > r1 goto pc+2 - R0=inv(id=0,umax_value=255,var_off=(0x0; 0xff)) R1=pkt_end R2=pkt(id=2,off=8,r=8) R3=pkt(id=2,off=0,r=8) R4=inv(id=0,umax_value=3570,var_off=(0x0; 0xfffe)) R5=pkt(id=0,off=14,r=14) R10=fp - 19: r1 = *(u8 *)(r3 +4) - -The state of the register R3 is R3=pkt(id=2,off=0,r=8) -id=2 means that two ``r3 += rX`` instructions were seen, so r3 points to some -offset within a packet and since the program author did -``if (r3 + 8 > r1) goto err`` at insn #18, the safe range is [R3, R3 + 8). -The verifier only allows 'add'/'sub' operations on packet registers. Any other -operation will set the register state to 'SCALAR_VALUE' and it won't be -available for direct packet access. - -Operation ``r3 += rX`` may overflow and become less than original skb->data, -therefore the verifier has to prevent that. So when it sees ``r3 += rX`` -instruction and rX is more than 16-bit value, any subsequent bounds-check of r3 -against skb->data_end will not give us 'range' information, so attempts to read -through the pointer will give "invalid access to packet" error. - -Ex. after insn ``r4 = *(u8 *)(r3 +12)`` (insn #7 above) the state of r4 is -R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff)) which means that upper 56 bits -of the register are guaranteed to be zero, and nothing is known about the lower -8 bits. After insn ``r4 *= 14`` the state becomes -R4=inv(id=0,umax_value=3570,var_off=(0x0; 0xfffe)), since multiplying an 8-bit -value by constant 14 will keep upper 52 bits as zero, also the least significant -bit will be zero as 14 is even. Similarly ``r2 >>= 48`` will make -R2=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff)), since the shift is not sign -extending. This logic is implemented in adjust_reg_min_max_vals() function, -which calls adjust_ptr_min_max_vals() for adding pointer to scalar (or vice -versa) and adjust_scalar_min_max_vals() for operations on two scalars. - -The end result is that bpf program author can access packet directly -using normal C code as:: - - void *data = (void *)(long)skb->data; - void *data_end = (void *)(long)skb->data_end; - struct eth_hdr *eth = data; - struct iphdr *iph = data + sizeof(*eth); - struct udphdr *udp = data + sizeof(*eth) + sizeof(*iph); - - if (data + sizeof(*eth) + sizeof(*iph) + sizeof(*udp) > data_end) - return 0; - if (eth->h_proto != htons(ETH_P_IP)) - return 0; - if (iph->protocol != IPPROTO_UDP || iph->ihl != 5) - return 0; - if (udp->dest == 53 || udp->source == 9) - ...; - -which makes such programs easier to write comparing to LD_ABS insn -and significantly faster. - -eBPF maps ---------- -'maps' is a generic storage of different types for sharing data between kernel -and userspace. - -The maps are accessed from user space via BPF syscall, which has commands: - -- create a map with given type and attributes - ``map_fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)`` - using attr->map_type, attr->key_size, attr->value_size, attr->max_entries - returns process-local file descriptor or negative error - -- lookup key in a given map - ``err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)`` - using attr->map_fd, attr->key, attr->value - returns zero and stores found elem into value or negative error - -- create or update key/value pair in a given map - ``err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)`` - using attr->map_fd, attr->key, attr->value - returns zero or negative error - -- find and delete element by key in a given map - ``err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)`` - using attr->map_fd, attr->key - -- to delete map: close(fd) - Exiting process will delete maps automatically - -userspace programs use this syscall to create/access maps that eBPF programs -are concurrently updating. - -maps can have different types: hash, array, bloom filter, radix-tree, etc. - -The map is defined by: - - - type - - max number of elements - - key size in bytes - - value size in bytes - -Pruning -------- -The verifier does not actually walk all possible paths through the program. For -each new branch to analyse, the verifier looks at all the states it's previously -been in when at this instruction. If any of them contain the current state as a -subset, the branch is 'pruned' - that is, the fact that the previous state was -accepted implies the current state would be as well. For instance, if in the -previous state, r1 held a packet-pointer, and in the current state, r1 holds a -packet-pointer with a range as long or longer and at least as strict an -alignment, then r1 is safe. Similarly, if r2 was NOT_INIT before then it can't -have been used by any path from that point, so any value in r2 (including -another NOT_INIT) is safe. The implementation is in the function regsafe(). -Pruning considers not only the registers but also the stack (and any spilled -registers it may hold). They must all be safe for the branch to be pruned. -This is implemented in states_equal(). - -Understanding eBPF verifier messages ------------------------------------- - -The following are few examples of invalid eBPF programs and verifier error -messages as seen in the log: - -Program with unreachable instructions:: - - static struct bpf_insn prog[] = { - BPF_EXIT_INSN(), - BPF_EXIT_INSN(), - }; - -Error: - - unreachable insn 1 - -Program that reads uninitialized register:: - - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - -Error:: - - 0: (bf) r0 = r2 - R2 !read_ok - -Program that doesn't initialize R0 before exiting:: - - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - -Error:: - - 0: (bf) r2 = r1 - 1: (95) exit - R0 !read_ok - -Program that accesses stack out of bounds:: - - BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0), - BPF_EXIT_INSN(), - -Error:: - - 0: (7a) *(u64 *)(r10 +8) = 0 - invalid stack off=8 size=8 - -Program that doesn't initialize stack before passing its address into function:: - - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - -Error:: - - 0: (bf) r2 = r10 - 1: (07) r2 += -8 - 2: (b7) r1 = 0x0 - 3: (85) call 1 - invalid indirect read from stack off -8+0 size 8 - -Program that uses invalid map_fd=0 while calling to map_lookup_elem() function:: - - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - -Error:: - - 0: (7a) *(u64 *)(r10 -8) = 0 - 1: (bf) r2 = r10 - 2: (07) r2 += -8 - 3: (b7) r1 = 0x0 - 4: (85) call 1 - fd 0 is not pointing to valid bpf_map - -Program that doesn't check return value of map_lookup_elem() before accessing -map element:: - - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - -Error:: - - 0: (7a) *(u64 *)(r10 -8) = 0 - 1: (bf) r2 = r10 - 2: (07) r2 += -8 - 3: (b7) r1 = 0x0 - 4: (85) call 1 - 5: (7a) *(u64 *)(r0 +0) = 0 - R0 invalid mem access 'map_value_or_null' - -Program that correctly checks map_lookup_elem() returned value for NULL, but -accesses the memory with incorrect alignment:: - - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0), - BPF_EXIT_INSN(), - -Error:: - - 0: (7a) *(u64 *)(r10 -8) = 0 - 1: (bf) r2 = r10 - 2: (07) r2 += -8 - 3: (b7) r1 = 1 - 4: (85) call 1 - 5: (15) if r0 == 0x0 goto pc+1 - R0=map_ptr R10=fp - 6: (7a) *(u64 *)(r0 +4) = 0 - misaligned access off 4 size 8 - -Program that correctly checks map_lookup_elem() returned value for NULL and -accesses memory with correct alignment in one side of 'if' branch, but fails -to do so in the other side of 'if' branch:: - - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - -Error:: - - 0: (7a) *(u64 *)(r10 -8) = 0 - 1: (bf) r2 = r10 - 2: (07) r2 += -8 - 3: (b7) r1 = 1 - 4: (85) call 1 - 5: (15) if r0 == 0x0 goto pc+2 - R0=map_ptr R10=fp - 6: (7a) *(u64 *)(r0 +0) = 0 - 7: (95) exit - - from 5 to 8: R0=imm0 R10=fp - 8: (7a) *(u64 *)(r0 +0) = 1 - R0 invalid mem access 'imm' - -Program that performs a socket lookup then sets the pointer to NULL without -checking it:: - - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_3, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - -Error:: - - 0: (b7) r2 = 0 - 1: (63) *(u32 *)(r10 -8) = r2 - 2: (bf) r2 = r10 - 3: (07) r2 += -8 - 4: (b7) r3 = 4 - 5: (b7) r4 = 0 - 6: (b7) r5 = 0 - 7: (85) call bpf_sk_lookup_tcp#65 - 8: (b7) r0 = 0 - 9: (95) exit - Unreleased reference id=1, alloc_insn=7 - -Program that performs a socket lookup but does not NULL-check the returned -value:: - - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_3, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp), - BPF_EXIT_INSN(), - -Error:: - - 0: (b7) r2 = 0 - 1: (63) *(u32 *)(r10 -8) = r2 - 2: (bf) r2 = r10 - 3: (07) r2 += -8 - 4: (b7) r3 = 4 - 5: (b7) r4 = 0 - 6: (b7) r5 = 0 - 7: (85) call bpf_sk_lookup_tcp#65 - 8: (95) exit - Unreleased reference id=1, alloc_insn=7 - Testing ------- Next to the BPF toolchain, the kernel also ships a test module that contains -various test cases for classic and internal BPF that can be executed against +various test cases for classic and eBPF that can be executed against the BPF interpreter and JIT compiler. It can be found in lib/test_bpf.c and enabled via Kconfig:: diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index b398b8576417..cf908d79666e 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -35,6 +35,7 @@ GNU make 3.81 make --version binutils 2.23 ld -v flex 2.5.35 flex --version bison 2.0 bison --version +pahole 1.16 pahole --version util-linux 2.10o fdformat --version kmod 13 depmod -V e2fsprogs 1.41.4 e2fsck -V @@ -108,6 +109,16 @@ Bison Since Linux 4.16, the build system generates parsers during build. This requires bison 2.0 or later. +pahole: +------- + +Since Linux 5.2, if CONFIG_DEBUG_INFO_BTF is selected, the build system +generates BTF (BPF Type Format) from DWARF in vmlinux, a bit later from kernel +modules as well. This requires pahole v1.16 or later. + +It is found in the 'dwarves' or 'pahole' distro packages or from +https://fedorapeople.org/~acme/dwarves/. + Perl ---- diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst index da085d63af9b..6b3aaed66fba 100644 --- a/Documentation/process/submitting-patches.rst +++ b/Documentation/process/submitting-patches.rst @@ -14,7 +14,8 @@ works, see Documentation/process/development-process.rst. Also, read Documentation/process/submit-checklist.rst for a list of items to check before submitting code. If you are submitting a driver, also read Documentation/process/submitting-drivers.rst; for device -tree binding patches, read Documentation/process/submitting-patches.rst. +tree binding patches, read +Documentation/devicetree/bindings/submitting-patches.rst. This documentation assumes that you're using ``git`` to prepare your patches. If you're unfamiliar with ``git``, you would be well-advised to learn how to diff --git a/MAINTAINERS b/MAINTAINERS index da2fb287c4b3..19d6fccf1205 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3569,7 +3569,7 @@ R: Florent Revest <revest@chromium.org> R: Brendan Jackman <jackmanb@chromium.org> L: bpf@vger.kernel.org S: Maintained -F: Documentation/bpf/bpf_lsm.rst +F: Documentation/bpf/prog_lsm.rst F: include/linux/bpf_lsm.h F: kernel/bpf/bpf_lsm.c F: security/bpf/ @@ -12180,8 +12180,8 @@ F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* F: include/linux/mlx5/mlx5_ifc_fpga.h MELLANOX ETHERNET SWITCH DRIVERS -M: Jiri Pirko <jiri@nvidia.com> M: Ido Schimmel <idosch@nvidia.com> +M: Petr Machata <petrm@nvidia.com> L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com @@ -15994,6 +15994,7 @@ F: arch/mips/generic/board-ranchu.c RANDOM NUMBER DRIVER M: "Theodore Ts'o" <tytso@mit.edu> +M: Jason A. Donenfeld <Jason@zx2c4.com> S: Maintained F: drivers/char/random.c @@ -16516,6 +16517,12 @@ T: git git://linuxtv.org/media_tree.git F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml F: drivers/media/platform/sunxi/sun8i-rotate/ +RPMSG TTY DRIVER +M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com> +L: linux-remoteproc@vger.kernel.org +S: Maintained +F: drivers/tty/rpmsg_tty.c + RTL2830 MEDIA DRIVER M: Antti Palosaari <crope@iki.fi> L: linux-media@vger.kernel.org @@ -16637,8 +16644,8 @@ W: http://www.ibm.com/developerworks/linux/linux390/ F: drivers/iommu/s390-iommu.c S390 IUCV NETWORK LAYER -M: Julian Wiedmann <jwi@linux.ibm.com> -M: Karsten Graul <kgraul@linux.ibm.com> +M: Alexandra Winter <wintera@linux.ibm.com> +M: Wenjia Zhang <wenjia@linux.ibm.com> L: linux-s390@vger.kernel.org L: netdev@vger.kernel.org S: Supported @@ -16648,8 +16655,8 @@ F: include/net/iucv/ F: net/iucv/ S390 NETWORK DRIVERS -M: Julian Wiedmann <jwi@linux.ibm.com> -M: Karsten Graul <kgraul@linux.ibm.com> +M: Alexandra Winter <wintera@linux.ibm.com> +M: Wenjia Zhang <wenjia@linux.ibm.com> L: linux-s390@vger.kernel.org L: netdev@vger.kernel.org S: Supported @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 16 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc4 NAME = Gobble Gobble # *DOCUMENTATION* @@ -789,7 +789,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong KBUILD_CFLAGS += $(stackp-flags-y) KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror -KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH) +KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH:"%"=%) ifdef CONFIG_CC_IS_CLANG KBUILD_CPPFLAGS += -Qunused-arguments diff --git a/arch/Kconfig b/arch/Kconfig index 26b8ed11639d..d3c4ab249e9c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -991,6 +991,16 @@ config HAVE_ARCH_COMPAT_MMAP_BASES and vice-versa 32-bit applications to call 64-bit mmap(). Required for applications doing different bitness syscalls. +config PAGE_SIZE_LESS_THAN_64KB + def_bool y + depends on !ARM64_64K_PAGES + depends on !IA64_PAGE_SIZE_64KB + depends on !PAGE_SIZE_64KB + depends on !PARISC_PAGE_SIZE_64KB + depends on !PPC_64K_PAGES + depends on !PPC_256K_PAGES + depends on !PAGE_SIZE_256KB + # This allows to use a set of generic functions to determine mmap base # address by giving priority to top-down scheme only if the process # is not in legacy mode (compat task, unlimited stack size or diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi index 44526ad9d210..eee2f63b9bba 100644 --- a/arch/arm/boot/dts/qcom-sdx55.dtsi +++ b/arch/arm/boot/dts/qcom-sdx55.dtsi @@ -333,12 +333,10 @@ clocks = <&rpmhcc RPMH_IPA_CLK>; clock-names = "core"; - interconnects = <&system_noc MASTER_IPA &system_noc SLAVE_SNOC_MEM_NOC_GC>, - <&mem_noc MASTER_SNOC_GC_MEM_NOC &mc_virt SLAVE_EBI_CH0>, + interconnects = <&system_noc MASTER_IPA &mc_virt SLAVE_EBI_CH0>, <&system_noc MASTER_IPA &system_noc SLAVE_OCIMEM>, <&mem_noc MASTER_AMPSS_M0 &system_noc SLAVE_IPA_CFG>; - interconnect-names = "memory-a", - "memory-b", + interconnect-names = "memory", "imem", "config"; diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi index 1d87fc0c24ee..c99c92f008a0 100644 --- a/arch/arm/boot/dts/sun8i-r40.dtsi +++ b/arch/arm/boot/dts/sun8i-r40.dtsi @@ -511,6 +511,16 @@ #interrupt-cells = <3>; #gpio-cells = <3>; + can_ph_pins: can-ph-pins { + pins = "PH20", "PH21"; + function = "can"; + }; + + can_pa_pins: can-pa-pins { + pins = "PA16", "PA17"; + function = "can"; + }; + clk_out_a_pin: clk-out-a-pin { pins = "PI12"; function = "clk_out_a"; @@ -926,6 +936,15 @@ #size-cells = <0>; }; + can0: can@1c2bc00 { + compatible = "allwinner,sun8i-r40-can"; + reg = <0x01c2bc00 0x400>; + interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&ccu CLK_BUS_CAN>; + resets = <&ccu RST_BUS_CAN>; + status = "disabled"; + }; + i2c4: i2c@1c2c000 { compatible = "allwinner,sun6i-a31-i2c"; reg = <0x01c2c000 0x400>; diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index eeb6dc0ecf46..10ceebb7530b 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -163,7 +163,7 @@ static const s8 bpf2a32[][2] = { [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)}, /* Read only Frame Pointer to access Stack */ [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)}, - /* Temporary Register for internal BPF JIT, can be used + /* Temporary Register for BPF JIT, can be used * for constant blindings and others. */ [TMP_REG_1] = {ARM_R7, ARM_R6}, @@ -1199,7 +1199,8 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) /* tmp2[0] = array, tmp2[1] = index */ - /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) + /* + * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) * goto out; * tail_call_cnt++; */ @@ -1208,7 +1209,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) tc = arm_bpf_get_reg64(tcc, tmp, ctx); emit(ARM_CMP_I(tc[0], hi), ctx); _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx); - _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); + _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx); emit(ARM_ADC_I(tc[0], tc[0], 0), ctx); arm_bpf_put_reg64(tcc, tmp, ctx); diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index a39fcf318c77..01d47c5886dc 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -91,7 +91,7 @@ #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) /* TCR_EL2 Registers bits */ -#define TCR_EL2_RES1 ((1 << 31) | (1 << 23)) +#define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) #define TCR_EL2_TBI (1 << 20) #define TCR_EL2_PS_SHIFT 16 #define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT) @@ -276,7 +276,7 @@ #define CPTR_EL2_TFP_SHIFT 10 /* Hyp Coprocessor Trap Register */ -#define CPTR_EL2_TCPAC (1 << 31) +#define CPTR_EL2_TCPAC (1U << 31) #define CPTR_EL2_TAM (1 << 30) #define CPTR_EL2_TTA (1 << 20) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index b3e4f9a088b1..8cf970d219f5 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -77,11 +77,17 @@ .endm SYM_CODE_START(ftrace_regs_caller) +#ifdef BTI_C + BTI_C +#endif ftrace_regs_entry 1 b ftrace_common SYM_CODE_END(ftrace_regs_caller) SYM_CODE_START(ftrace_caller) +#ifdef BTI_C + BTI_C +#endif ftrace_regs_entry 0 b ftrace_common SYM_CODE_END(ftrace_caller) diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 1038494135c8..6fb31c117ebe 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -147,7 +147,7 @@ int machine_kexec_post_load(struct kimage *kimage) if (rc) return rc; kimage->arch.ttbr1 = __pa(trans_pgd); - kimage->arch.zero_page = __pa(empty_zero_page); + kimage->arch.zero_page = __pa_symbol(empty_zero_page); reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start; memcpy(reloc_code, __relocate_new_kernel_start, reloc_size); diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 7a0af1d39303..96c5f3fb7838 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu); +static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code); + /* * Allow the hypervisor to handle the exit with an exit handler if it has one. * @@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) */ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) { + /* + * Save PSTATE early so that we can evaluate the vcpu mode + * early on. + */ + vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR); + + /* + * Check whether we want to repaint the state one way or + * another. + */ + early_exit_filter(vcpu, exit_code); + if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index de7e14c862e6..7ecca8b07851 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) { ctxt->regs.pc = read_sysreg_el2(SYS_ELR); - ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); + /* + * Guest PSTATE gets saved at guest fixup time in all + * cases. We still need to handle the nVHE host side here. + */ + if (!has_vhe() && ctxt->__hyp_running_vcpu) + ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2); diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c0e3fed26d93..d13115a12434 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) * Returns false if the guest ran in AArch32 when it shouldn't have, and * thus should exit to the host, or true if a the guest run loop can continue. */ -static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code) +static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) { struct kvm *kvm = kern_hyp_va(vcpu->kvm); @@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code) vcpu->arch.target = -1; *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT); *exit_code |= ARM_EXCEPTION_IL; - return false; } - - return true; } /* Switch to the guest for legacy non-VHE systems */ @@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) /* Jump in the fire! */ exit_code = __guest_enter(vcpu); - if (unlikely(!handle_aarch32_guest(vcpu, &exit_code))) - break; - /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 5a2cb5d9bc4b..fbb26b93c347 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) return hyp_exit_handlers; } +static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) +{ +} + /* Switch to the guest for VHE systems running in EL2 */ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 86c9dc0681cc..07aad85848fa 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -44,7 +44,7 @@ static const int bpf2a64[] = { [BPF_REG_9] = A64_R(22), /* read-only frame pointer to access stack */ [BPF_REG_FP] = A64_R(25), - /* temporary registers for internal BPF JIT */ + /* temporary registers for BPF JIT */ [TMP_REG_1] = A64_R(10), [TMP_REG_2] = A64_R(11), [TMP_REG_3] = A64_R(12), @@ -287,13 +287,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(A64_CMP(0, r3, tmp), ctx); emit(A64_B_(A64_COND_CS, jmp_offset), ctx); - /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) + /* + * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) * goto out; * tail_call_cnt++; */ emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); emit(A64_CMP(1, tcc, tmp), ctx); - emit(A64_B_(A64_COND_HI, jmp_offset), ctx); + emit(A64_B_(A64_COND_CS, jmp_offset), ctx); emit(A64_ADD_I(1, tcc, tcc, 1), ctx); /* prog = array->ptrs[index]; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index de60ad190057..0215dc1529e9 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -3097,7 +3097,7 @@ config STACKTRACE_SUPPORT config PGTABLE_LEVELS int default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48 - default 3 if 64BIT && !PAGE_SIZE_64KB + default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48) default 2 config MIPS_AUTO_PFN_OFFSET diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 2861a05c2e0c..f27cf31b4140 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -52,7 +52,7 @@ endif vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o -vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o +vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o targets := $(notdir $(vmlinuzobjs-y)) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index ac0e2cfc6d57..24a529c6c4be 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1734,8 +1734,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c) static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { - decode_configs(c); - /* All Loongson processors covered here define ExcCode 16 as GSExc. */ c->options |= MIPS_CPU_GSEXCEX; @@ -1796,6 +1794,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) panic("Unknown Loongson Processor ID!"); break; } + + decode_configs(c); } #else static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { } diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 376a6e2676e9..9f47a889b047 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -185,7 +185,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_puts(m, " tx39_cache"); if (cpu_has_octeon_cache) seq_puts(m, " octeon_cache"); - if (cpu_has_fpu) + if (raw_cpu_has_fpu) seq_puts(m, " fpu"); if (cpu_has_32fpr) seq_puts(m, " 32fpr"); diff --git a/arch/mips/net/bpf_jit_comp.h b/arch/mips/net/bpf_jit_comp.h index 6f3a7b07294b..a37fe20818eb 100644 --- a/arch/mips/net/bpf_jit_comp.h +++ b/arch/mips/net/bpf_jit_comp.h @@ -98,7 +98,7 @@ do { \ #define emit(...) __emit(__VA_ARGS__) /* Workaround for R10000 ll/sc errata */ -#ifdef CONFIG_WAR_R10000 +#ifdef CONFIG_WAR_R10000_LLSC #define LLSC_beqz beqzl #else #define LLSC_beqz beqz diff --git a/arch/mips/net/bpf_jit_comp32.c b/arch/mips/net/bpf_jit_comp32.c index bd996ede12f8..044b11b65bca 100644 --- a/arch/mips/net/bpf_jit_comp32.c +++ b/arch/mips/net/bpf_jit_comp32.c @@ -1381,8 +1381,7 @@ void build_prologue(struct jit_context *ctx) * 16-byte area in the parent's stack frame. On a tail call, the * calling function jumps into the prologue after these instructions. */ - emit(ctx, ori, MIPS_R_T9, MIPS_R_ZERO, - min(MAX_TAIL_CALL_CNT + 1, 0xffff)); + emit(ctx, ori, MIPS_R_T9, MIPS_R_ZERO, min(MAX_TAIL_CALL_CNT, 0xffff)); emit(ctx, sw, MIPS_R_T9, 0, MIPS_R_SP); /* diff --git a/arch/mips/net/bpf_jit_comp64.c b/arch/mips/net/bpf_jit_comp64.c index 815ade724227..6475828ffb36 100644 --- a/arch/mips/net/bpf_jit_comp64.c +++ b/arch/mips/net/bpf_jit_comp64.c @@ -552,7 +552,7 @@ void build_prologue(struct jit_context *ctx) * On a tail call, the calling function jumps into the prologue * after this instruction. */ - emit(ctx, addiu, tc, MIPS_R_ZERO, min(MAX_TAIL_CALL_CNT + 1, 0xffff)); + emit(ctx, ori, tc, MIPS_R_ZERO, min(MAX_TAIL_CALL_CNT, 0xffff)); /* === Entry-point for tail calls === */ diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 8db4af4879d0..82d77f4b0d08 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -15,7 +15,12 @@ # Mike Shaver, Helge Deller and Martin K. Petersen # +ifdef CONFIG_PARISC_SELF_EXTRACT +boot := arch/parisc/boot +KBUILD_IMAGE := $(boot)/bzImage +else KBUILD_IMAGE := vmlinuz +endif NM = sh $(srctree)/arch/parisc/nm CHECKFLAGS += -D__hppa__=1 diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig index d2daeac2b217..1b8fd80cbe7f 100644 --- a/arch/parisc/configs/generic-64bit_defconfig +++ b/arch/parisc/configs/generic-64bit_defconfig @@ -1,7 +1,9 @@ CONFIG_LOCALVERSION="-64bit" # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_LZ4=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -35,6 +37,7 @@ CONFIG_MODVERSIONS=y CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BINFMT_MISC=m # CONFIG_COMPACTION is not set +CONFIG_MEMORY_FAILURE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -65,12 +68,15 @@ CONFIG_SCSI_ISCSI_ATTRS=y CONFIG_SCSI_SRP_ATTRS=y CONFIG_ISCSI_BOOT_SYSFS=y CONFIG_SCSI_MPT2SAS=y -CONFIG_SCSI_LASI700=m +CONFIG_SCSI_LASI700=y CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_ZALON=y CONFIG_SCSI_QLA_ISCSI=m CONFIG_SCSI_DH=y CONFIG_ATA=y +CONFIG_SATA_SIL=y +CONFIG_SATA_SIS=y +CONFIG_SATA_VIA=y CONFIG_PATA_NS87415=y CONFIG_PATA_SIL680=y CONFIG_ATA_GENERIC=y @@ -79,6 +85,7 @@ CONFIG_MD_LINEAR=m CONFIG_BLK_DEV_DM=m CONFIG_DM_RAID=m CONFIG_DM_UEVENT=y +CONFIG_DM_AUDIT=y CONFIG_FUSION=y CONFIG_FUSION_SPI=y CONFIG_FUSION_SAS=y @@ -196,10 +203,15 @@ CONFIG_FB_MATROX_G=y CONFIG_FB_MATROX_I2C=y CONFIG_FB_MATROX_MAVEN=y CONFIG_FB_RADEON=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_CLUT224 is not set CONFIG_HIDRAW=y CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_UIO=y CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_AEC=m diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh index 056d588befdd..70d3cffb0251 100644 --- a/arch/parisc/install.sh +++ b/arch/parisc/install.sh @@ -39,6 +39,7 @@ verify "$3" if [ -n "${INSTALLKERNEL}" ]; then if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi + if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi fi # Default install diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 9fb1e794831b..061119a56fbe 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -249,30 +249,16 @@ void __init time_init(void) static int __init init_cr16_clocksource(void) { /* - * The cr16 interval timers are not syncronized across CPUs on - * different sockets, so mark them unstable and lower rating on - * multi-socket SMP systems. + * The cr16 interval timers are not syncronized across CPUs, even if + * they share the same socket. */ if (num_online_cpus() > 1 && !running_on_qemu) { - int cpu; - unsigned long cpu0_loc; - cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; - - for_each_online_cpu(cpu) { - if (cpu == 0) - continue; - if ((cpu0_loc != 0) && - (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)) - continue; - - /* mark sched_clock unstable */ - clear_sched_clock_stable(); - - clocksource_cr16.name = "cr16_unstable"; - clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; - clocksource_cr16.rating = 0; - break; - } + /* mark sched_clock unstable */ + clear_sched_clock_stable(); + + clocksource_cr16.name = "cr16_unstable"; + clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; + clocksource_cr16.rating = 0; } /* register at clocksource framework */ diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index 6b1ec9e3541b..349c4a820231 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -202,11 +202,11 @@ vmap_stack_overflow: mfspr r1, SPRN_SPRG_THREAD lwz r1, TASK_CPU - THREAD(r1) slwi r1, r1, 3 - addis r1, r1, emergency_ctx@ha + addis r1, r1, emergency_ctx-PAGE_OFFSET@ha #else - lis r1, emergency_ctx@ha + lis r1, emergency_ctx-PAGE_OFFSET@ha #endif - lwz r1, emergency_ctx@l(r1) + lwz r1, emergency_ctx-PAGE_OFFSET@l(r1) addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE EXCEPTION_PROLOG_2 0 vmap_stack_overflow prepare_transfer_to_handler diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index fcf4760a3a0e..70b7a8f97153 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *kvm) "r" (0) : "memory"); } asm volatile("ptesync": : :"memory"); + // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now. asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); } else { for (set = 0; set < kvm->arch.tlb_sets; ++set) { @@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *kvm) rb += PPC_BIT(51); /* increment set number */ } asm volatile("ptesync": : :"memory"); - asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); + // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now. + if (cpu_has_feature(CPU_FTR_ARCH_300)) + asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); } } diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index 0da31d41d413..8a4faa05f9e4 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -221,13 +221,13 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o PPC_BCC(COND_GE, out); /* - * if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) * goto out; */ EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT)); /* tail_call_cnt++; */ EMIT(PPC_RAW_ADDIC(_R0, _R0, 1)); - PPC_BCC(COND_GT, out); + PPC_BCC(COND_GE, out); /* prog = array->ptrs[index]; */ EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29)); diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 8b5157ccfeba..8571aafcc9e1 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -228,12 +228,12 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o PPC_BCC(COND_GE, out); /* - * if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) * goto out; */ PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT)); - PPC_BCC(COND_GT, out); + PPC_BCC(COND_GE, out); /* * tail_call_cnt++; diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 25ba21f98504..2639b9ee48f9 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -12,14 +12,12 @@ #include <linux/types.h> #include <linux/kvm.h> #include <linux/kvm_types.h> +#include <asm/csr.h> #include <asm/kvm_vcpu_fp.h> #include <asm/kvm_vcpu_timer.h> -#ifdef CONFIG_64BIT -#define KVM_MAX_VCPUS (1U << 16) -#else -#define KVM_MAX_VCPUS (1U << 9) -#endif +#define KVM_MAX_VCPUS \ + ((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1) #define KVM_HALT_POLL_NS_DEFAULT 500000 diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index d81bae8eb55e..fc058ff5f4b6 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { + gpa_t gpa = slot->base_gfn << PAGE_SHIFT; + phys_addr_t size = slot->npages << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); + stage2_unmap_range(kvm, gpa, size, false); + spin_unlock(&kvm->mmu_lock); } void kvm_arch_commit_memory_region(struct kvm *kvm, diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c index e6497424cbf6..529a83b85c1c 100644 --- a/arch/riscv/net/bpf_jit_comp32.c +++ b/arch/riscv/net/bpf_jit_comp32.c @@ -799,11 +799,10 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) emit_bcc(BPF_JGE, lo(idx_reg), RV_REG_T1, off, ctx); /* - * temp_tcc = tcc - 1; - * if (tcc < 0) + * if (--tcc < 0) * goto out; */ - emit(rv_addi(RV_REG_T1, RV_REG_TCC, -1), ctx); + emit(rv_addi(RV_REG_TCC, RV_REG_TCC, -1), ctx); off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); emit_bcc(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx); @@ -829,7 +828,6 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) if (is_12b_check(off, insn)) return -1; emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx); - emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx); /* Epilogue jumps to *(t0 + 4). */ __build_epilogue(true, ctx); return 0; diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index f2a779c7e225..603630b6f3c5 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -327,12 +327,12 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); emit_branch(BPF_JGE, RV_REG_A2, RV_REG_T1, off, ctx); - /* if (TCC-- < 0) + /* if (--TCC < 0) * goto out; */ - emit_addi(RV_REG_T1, tcc, -1, ctx); + emit_addi(RV_REG_TCC, tcc, -1, ctx); off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); - emit_branch(BPF_JSLT, tcc, RV_REG_ZERO, off, ctx); + emit_branch(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx); /* prog = array->ptrs[index]; * if (!prog) @@ -352,7 +352,6 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) if (is_12b_check(off, insn)) return -1; emit_ld(RV_REG_T3, off, RV_REG_T2, ctx); - emit_mv(RV_REG_TCC, RV_REG_T1, ctx); __build_epilogue(true, ctx); return 0; } diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index fd825097cf04..b626bc6e0eaf 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -403,7 +403,6 @@ CONFIG_DEVTMPFS=y CONFIG_CONNECTOR=y CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y @@ -476,6 +475,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_VXLAN=m CONFIG_BAREUDP=m +CONFIG_AMT=m CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m @@ -489,6 +489,7 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ASIX is not set # CONFIG_NET_VENDOR_ATHEROS is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_BROCADE is not set @@ -571,6 +572,7 @@ CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m CONFIG_DIAG288_WATCHDOG=m +# CONFIG_DRM_DEBUG_MODESET_LOCK is not set CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y @@ -775,12 +777,14 @@ CONFIG_CRC4=m CONFIG_CRC7=m CONFIG_CRC8=m CONFIG_RANDOM32_SELFTEST=y +CONFIG_XZ_DEC_MICROLZMA=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_BTF=y CONFIG_GDB_SCRIPTS=y CONFIG_HEADERS_INSTALL=y CONFIG_DEBUG_SECTION_MISMATCH=y @@ -807,6 +811,7 @@ CONFIG_DEBUG_MEMORY_INIT=y CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_DEBUG_PER_CPU_MAPS=y CONFIG_KFENCE=y +CONFIG_KFENCE_STATIC_KEYS=y CONFIG_DEBUG_SHIRQ=y CONFIG_PANIC_ON_OOPS=y CONFIG_DETECT_HUNG_TASK=y @@ -842,6 +847,7 @@ CONFIG_FTRACE_STARTUP_TEST=y CONFIG_SAMPLES=y CONFIG_SAMPLE_TRACE_PRINTK=m CONFIG_SAMPLE_FTRACE_DIRECT=m +CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m CONFIG_DEBUG_ENTRY=y CONFIG_CIO_INJECT=y CONFIG_KUNIT=m @@ -860,7 +866,7 @@ CONFIG_FAIL_FUNCTION=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_LKDTM=m CONFIG_TEST_MIN_HEAP=y -CONFIG_KPROBES_SANITY_TEST=y +CONFIG_KPROBES_SANITY_TEST=m CONFIG_RBTREE_TEST=y CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index c9c3cedff2d8..0056cab27372 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -394,7 +394,6 @@ CONFIG_DEVTMPFS=y CONFIG_CONNECTOR=y CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y @@ -467,6 +466,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_VXLAN=m CONFIG_BAREUDP=m +CONFIG_AMT=m CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m @@ -480,6 +480,7 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ASIX is not set # CONFIG_NET_VENDOR_ATHEROS is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_BROCADE is not set @@ -762,12 +763,14 @@ CONFIG_PRIME_NUMBERS=m CONFIG_CRC4=m CONFIG_CRC7=m CONFIG_CRC8=m +CONFIG_XZ_DEC_MICROLZMA=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_BTF=y CONFIG_GDB_SCRIPTS=y CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y @@ -792,9 +795,11 @@ CONFIG_HIST_TRIGGERS=y CONFIG_SAMPLES=y CONFIG_SAMPLE_TRACE_PRINTK=m CONFIG_SAMPLE_FTRACE_DIRECT=m +CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m CONFIG_KUNIT=m CONFIG_KUNIT_DEBUGFS=y CONFIG_LKDTM=m +CONFIG_KPROBES_SANITY_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index aceccf3b9a88..eed3b9acfa71 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -65,9 +65,11 @@ CONFIG_ZFCP=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_LSM="yama,loadpin,safesetid,integrity" # CONFIG_ZLIB_DFLTCC is not set +CONFIG_XZ_DEC_MICROLZMA=y CONFIG_PRINTK_TIME=y # CONFIG_SYMBOLIC_ERRNAME is not set CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_BTF=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_PANIC_ON_OOPS=y diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index e4dc64cc9c55..287bb88f7698 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -14,12 +14,13 @@ /* I/O Map */ #define ZPCI_IOMAP_SHIFT 48 -#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL +#define ZPCI_IOMAP_ADDR_SHIFT 62 +#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT) #define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1) #define ZPCI_IOMAP_MAX_ENTRIES \ - ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT)) + (1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT)) #define ZPCI_IOMAP_ADDR_IDX_MASK \ - (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE) + ((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK) struct zpci_iomap_entry { u32 fh; diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c index cfc5f5557c06..bc7973359ae2 100644 --- a/arch/s390/lib/test_unwind.c +++ b/arch/s390/lib/test_unwind.c @@ -173,10 +173,11 @@ static noinline int unwindme_func4(struct unwindme *u) } /* - * trigger specification exception + * Trigger operation exception; use insn notation to bypass + * llvm's integrated assembler sanity checks. */ asm volatile( - " mvcl %%r1,%%r1\n" + " .insn e,0x0000\n" /* illegal opcode */ "0: nopr %%r7\n" EX_TABLE(0b, 0b) :); diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 233cc9bcd652..9ff2bd83aad7 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1369,7 +1369,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, jit->prg); /* - * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT) + * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) * goto out; */ @@ -1381,9 +1381,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4_IMM(0xa7080000, REG_W0, 1); /* laal %w1,%w0,off(%r15) */ EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off); - /* clij %w1,MAX_TAIL_CALL_CNT,0x2,out */ + /* clij %w1,MAX_TAIL_CALL_CNT-1,0x2,out */ patch_2_clij = jit->prg; - EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT, + EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT - 1, 2, jit->prg); /* diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 9a2f20cbd48b..b1e38784eb23 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -227,7 +227,7 @@ static const int bpf2sparc[] = { [BPF_REG_AX] = G7, - /* temporary register for internal BPF JIT */ + /* temporary register for BPF JIT */ [TMP_REG_1] = G1, [TMP_REG_2] = G2, [TMP_REG_3] = G3, @@ -867,7 +867,7 @@ static void emit_tail_call(struct jit_ctx *ctx) emit(LD32 | IMMED | RS1(SP) | S13(off) | RD(tmp), ctx); emit_cmpi(tmp, MAX_TAIL_CALL_CNT, ctx); #define OFFSET2 13 - emit_branch(BGU, ctx->idx, ctx->idx + OFFSET2, ctx); + emit_branch(BGEU, ctx->idx, ctx->idx + OFFSET2, ctx); emit_nop(ctx); emit_alu_K(ADD, tmp, 1, ctx); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7399327d1eff..5c2ccb85f2ef 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1932,6 +1932,7 @@ config EFI depends on ACPI select UCS2_STRING select EFI_RUNTIME_WRAPPERS + select ARCH_USE_MEMREMAP_PROT help This enables the kernel to use EFI runtime services that are available (such as the EFI variable services). diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index e38a4cf795d9..97b1f84bb53f 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -574,6 +574,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) ud2 1: #endif +#ifdef CONFIG_XEN_PV + ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV +#endif + POP_REGS pop_rdi=0 /* @@ -890,6 +894,7 @@ SYM_CODE_START_LOCAL(paranoid_entry) .Lparanoid_entry_checkgs: /* EBX = 1 -> kernel GSBASE active, no restore required */ movl $1, %ebx + /* * The kernel-enforced convention is a negative GSBASE indicates * a kernel value. No SWAPGS needed on entry and exit. @@ -897,21 +902,14 @@ SYM_CODE_START_LOCAL(paranoid_entry) movl $MSR_GS_BASE, %ecx rdmsr testl %edx, %edx - jns .Lparanoid_entry_swapgs - ret + js .Lparanoid_kernel_gsbase -.Lparanoid_entry_swapgs: + /* EBX = 0 -> SWAPGS required on exit */ + xorl %ebx, %ebx swapgs +.Lparanoid_kernel_gsbase: - /* - * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an - * unconditional CR3 write, even in the PTI case. So do an lfence - * to prevent GS speculation, regardless of whether PTI is enabled. - */ FENCE_SWAPGS_KERNEL_ENTRY - - /* EBX = 0 -> SWAPGS required on exit */ - xorl %ebx, %ebx ret SYM_CODE_END(paranoid_entry) @@ -993,11 +991,6 @@ SYM_CODE_START_LOCAL(error_entry) pushq %r12 ret -.Lerror_entry_done_lfence: - FENCE_SWAPGS_KERNEL_ENTRY -.Lerror_entry_done: - ret - /* * There are two places in the kernel that can potentially fault with * usergs. Handle them here. B stepping K8s sometimes report a @@ -1020,8 +1013,14 @@ SYM_CODE_START_LOCAL(error_entry) * .Lgs_change's error handler with kernel gsbase. */ SWAPGS - FENCE_SWAPGS_USER_ENTRY - jmp .Lerror_entry_done + + /* + * Issue an LFENCE to prevent GS speculation, regardless of whether it is a + * kernel or user gsbase. + */ +.Lerror_entry_done_lfence: + FENCE_SWAPGS_KERNEL_ENTRY + ret .Lbstep_iret: /* Fix truncated RIP */ diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index 6053674f9132..c2767a6a387e 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -102,12 +102,6 @@ extern void switch_fpu_return(void); */ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name); -/* - * Tasks that are not using SVA have mm->pasid set to zero to note that they - * will not have the valid bit set in MSR_IA32_PASID while they are running. - */ -#define PASID_DISABLED 0 - /* Trap handling */ extern int fpu__exception_code(struct fpu *fpu, int trap_nr); extern void fpu_sync_fpstate(struct fpu *fpu); diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 5a0bcf8b78d7..048b6d5aff50 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -108,7 +108,7 @@ #define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */ #define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */ -#define INTEL_FAM6_RAPTOR_LAKE 0xB7 +#define INTEL_FAM6_RAPTORLAKE 0xB7 /* "Small Core" Processors (Atom) */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6ac61f85e07b..860ed500580c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1036,6 +1036,7 @@ struct kvm_x86_msr_filter { #define APICV_INHIBIT_REASON_PIT_REINJ 4 #define APICV_INHIBIT_REASON_X2APIC 5 #define APICV_INHIBIT_REASON_BLOCKIRQ 6 +#define APICV_INHIBIT_REASON_ABSENT 7 struct kvm_arch { unsigned long n_used_mmu_pages; diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h index 2cef6c5a52c2..6acaf5af0a3d 100644 --- a/arch/x86/include/asm/sev-common.h +++ b/arch/x86/include/asm/sev-common.h @@ -73,4 +73,15 @@ #define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK) +/* + * Error codes related to GHCB input that can be communicated back to the guest + * by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2. + */ +#define GHCB_ERR_NOT_REGISTERED 1 +#define GHCB_ERR_INVALID_USAGE 2 +#define GHCB_ERR_INVALID_SCRATCH_AREA 3 +#define GHCB_ERR_MISSING_INPUT 4 +#define GHCB_ERR_INVALID_INPUT 5 +#define GHCB_ERR_INVALID_EVENT 6 + #endif diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index d5958278eba6..91d4b6de58ab 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -118,7 +118,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, struct fpstate *fpstate) { struct xregs_state __user *x = buf; - struct _fpx_sw_bytes sw_bytes; + struct _fpx_sw_bytes sw_bytes = {}; u32 xfeatures; int err; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c410be738ae7..6a190c7f4d71 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -742,7 +742,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) return 0; } -static char *prepare_command_line(void) +static char * __init prepare_command_line(void) { #ifdef CONFIG_CMDLINE_BOOL #ifdef CONFIG_CMDLINE_OVERRIDE diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 74f0ec955384..a9fc2ac7a8bd 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -294,11 +294,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, char *dst, char *buf, size_t size) { unsigned long error_code = X86_PF_PROT | X86_PF_WRITE; - char __user *target = (char __user *)dst; - u64 d8; - u32 d4; - u16 d2; - u8 d1; /* * This function uses __put_user() independent of whether kernel or user @@ -320,26 +315,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, * instructions here would cause infinite nesting. */ switch (size) { - case 1: + case 1: { + u8 d1; + u8 __user *target = (u8 __user *)dst; + memcpy(&d1, buf, 1); if (__put_user(d1, target)) goto fault; break; - case 2: + } + case 2: { + u16 d2; + u16 __user *target = (u16 __user *)dst; + memcpy(&d2, buf, 2); if (__put_user(d2, target)) goto fault; break; - case 4: + } + case 4: { + u32 d4; + u32 __user *target = (u32 __user *)dst; + memcpy(&d4, buf, 4); if (__put_user(d4, target)) goto fault; break; - case 8: + } + case 8: { + u64 d8; + u64 __user *target = (u64 __user *)dst; + memcpy(&d8, buf, 8); if (__put_user(d8, target)) goto fault; break; + } default: WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); return ES_UNSUPPORTED; @@ -362,11 +373,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, char *src, char *buf, size_t size) { unsigned long error_code = X86_PF_PROT; - char __user *s = (char __user *)src; - u64 d8; - u32 d4; - u16 d2; - u8 d1; /* * This function uses __get_user() independent of whether kernel or user @@ -388,26 +394,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, * instructions here would cause infinite nesting. */ switch (size) { - case 1: + case 1: { + u8 d1; + u8 __user *s = (u8 __user *)src; + if (__get_user(d1, s)) goto fault; memcpy(buf, &d1, 1); break; - case 2: + } + case 2: { + u16 d2; + u16 __user *s = (u16 __user *)src; + if (__get_user(d2, s)) goto fault; memcpy(buf, &d2, 2); break; - case 4: + } + case 4: { + u32 d4; + u32 __user *s = (u32 __user *)src; + if (__get_user(d4, s)) goto fault; memcpy(buf, &d4, 4); break; - case 8: + } + case 8: { + u64 d8; + u64 __user *s = (u64 __user *)src; if (__get_user(d8, s)) goto fault; memcpy(buf, &d8, 8); break; + } default: WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); return ES_UNSUPPORTED; diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 2e076a459a0c..a698196377be 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1180,6 +1180,12 @@ void mark_tsc_unstable(char *reason) EXPORT_SYMBOL_GPL(mark_tsc_unstable); +static void __init tsc_disable_clocksource_watchdog(void) +{ + clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY; + clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; +} + static void __init check_system_tsc_reliable(void) { #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) @@ -1196,6 +1202,23 @@ static void __init check_system_tsc_reliable(void) #endif if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) tsc_clocksource_reliable = 1; + + /* + * Disable the clocksource watchdog when the system has: + * - TSC running at constant frequency + * - TSC which does not stop in C-States + * - the TSC_ADJUST register which allows to detect even minimal + * modifications + * - not more than two sockets. As the number of sockets cannot be + * evaluated at the early boot stage where this has to be + * invoked, check the number of online memory nodes as a + * fallback solution which is an reasonable estimate. + */ + if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && + boot_cpu_has(X86_FEATURE_NONSTOP_TSC) && + boot_cpu_has(X86_FEATURE_TSC_ADJUST) && + nr_online_nodes <= 2) + tsc_disable_clocksource_watchdog(); } /* @@ -1387,9 +1410,6 @@ static int __init init_tsc_clocksource(void) if (tsc_unstable) goto unreg; - if (tsc_clocksource_reliable || no_tsc_watchdog) - clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; - if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; @@ -1527,7 +1547,7 @@ void __init tsc_init(void) } if (tsc_clocksource_reliable || no_tsc_watchdog) - clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY; + tsc_disable_clocksource_watchdog(); clocksource_register_khz(&clocksource_tsc_early, tsc_khz); detect_art(); diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 50a4515fe0ad..9452dc9664b5 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -30,6 +30,7 @@ struct tsc_adjust { }; static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust); +static struct timer_list tsc_sync_check_timer; /* * TSC's on different sockets may be reset asynchronously. @@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume) } } +/* + * Normally the tsc_sync will be checked every time system enters idle + * state, but there is still caveat that a system won't enter idle, + * either because it's too busy or configured purposely to not enter + * idle. + * + * So setup a periodic timer (every 10 minutes) to make sure the check + * is always on. + */ + +#define SYNC_CHECK_INTERVAL (HZ * 600) + +static void tsc_sync_check_timer_fn(struct timer_list *unused) +{ + int next_cpu; + + tsc_verify_tsc_adjust(false); + + /* Run the check for all onlined CPUs in turn */ + next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); + if (next_cpu >= nr_cpu_ids) + next_cpu = cpumask_first(cpu_online_mask); + + tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL; + add_timer_on(&tsc_sync_check_timer, next_cpu); +} + +static int __init start_sync_check_timer(void) +{ + if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable) + return 0; + + timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0); + tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL; + add_timer(&tsc_sync_check_timer); + + return 0; +} +late_initcall(start_sync_check_timer); + static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval, unsigned int cpu, bool bootcpu) { diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index e66e620c3bed..539333ac4b38 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h @@ -81,7 +81,6 @@ struct kvm_ioapic { unsigned long irq_states[IOAPIC_NUM_PINS]; struct kvm_io_device dev; struct kvm *kvm; - void (*ack_notifier)(void *opaque, int irq); spinlock_t lock; struct rtc_status rtc_status; struct delayed_work eoi_inject; diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 650642b18d15..c2d7cfe82d00 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -56,7 +56,6 @@ struct kvm_pic { struct kvm_io_device dev_master; struct kvm_io_device dev_slave; struct kvm_io_device dev_elcr; - void (*ack_notifier)(void *opaque, int irq); unsigned long irq_states[PIC_NUM_PINS]; }; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 759952dd1222..f206fc35deff 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -707,7 +707,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) { int highest_irr; - if (apic->vcpu->arch.apicv_active) + if (kvm_x86_ops.sync_pir_to_irr) highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu); else highest_irr = apic_find_highest_irr(apic); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3be9beea838d..e2e1d012df22 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1582,7 +1582,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp); if (is_tdp_mmu_enabled(kvm)) - flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); + flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); return flush; } @@ -1936,7 +1936,11 @@ static void mmu_audit_disable(void) { } static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) { - return sp->role.invalid || + if (sp->role.invalid) + return true; + + /* TDP MMU pages due not use the MMU generation. */ + return !sp->tdp_mmu_page && unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); } @@ -2173,10 +2177,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato iterator->shadow_addr = root; iterator->level = vcpu->arch.mmu->shadow_root_level; - if (iterator->level == PT64_ROOT_4LEVEL && + if (iterator->level >= PT64_ROOT_4LEVEL && vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && !vcpu->arch.mmu->direct_map) - --iterator->level; + iterator->level = PT32E_ROOT_LEVEL; if (iterator->level == PT32E_ROOT_LEVEL) { /* @@ -3976,6 +3980,20 @@ out_retry: return true; } +/* + * Returns true if the page fault is stale and needs to be retried, i.e. if the + * root was invalidated by a memslot update or a relevant mmu_notifier fired. + */ +static bool is_page_fault_stale(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault, int mmu_seq) +{ + if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa))) + return true; + + return fault->slot && + mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva); +} + static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); @@ -4013,8 +4031,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault else write_lock(&vcpu->kvm->mmu_lock); - if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva)) + if (is_page_fault_stale(vcpu, fault, mmu_seq)) goto out_unlock; + r = make_mmu_pages_available(vcpu); if (r) goto out_unlock; @@ -4855,7 +4874,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, struct kvm_mmu *context = &vcpu->arch.guest_mmu; struct kvm_mmu_role_regs regs = { .cr0 = cr0, - .cr4 = cr4, + .cr4 = cr4 & ~X86_CR4_PKE, .efer = efer, }; union kvm_mmu_role new_role; @@ -4919,7 +4938,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->direct_map = false; update_permission_bitmask(context, true); - update_pkru_bitmask(context); + context->pkru_mask = 0; reset_rsvds_bits_mask_ept(vcpu, context, execonly); reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); } @@ -5025,6 +5044,14 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) /* * Invalidate all MMU roles to force them to reinitialize as CPUID * information is factored into reserved bit calculations. + * + * Correctly handling multiple vCPU models with respect to paging and + * physical address properties) in a single VM would require tracking + * all relevant CPUID information in kvm_mmu_page_role. That is very + * undesirable as it would increase the memory requirements for + * gfn_track (see struct kvm_mmu_page_role comments). For now that + * problem is swept under the rug; KVM's CPUID API is horrific and + * it's all but impossible to solve it without introducing a new API. */ vcpu->arch.root_mmu.mmu_role.ext.valid = 0; vcpu->arch.guest_mmu.mmu_role.ext.valid = 0; @@ -5032,24 +5059,10 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_mmu_reset_context(vcpu); /* - * KVM does not correctly handle changing guest CPUID after KVM_RUN, as - * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't - * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page - * faults due to reusing SPs/SPTEs. Alert userspace, but otherwise - * sweep the problem under the rug. - * - * KVM's horrific CPUID ABI makes the problem all but impossible to - * solve, as correctly handling multiple vCPU models (with respect to - * paging and physical address properties) in a single VM would require - * tracking all relevant CPUID information in kvm_mmu_page_role. That - * is very undesirable as it would double the memory requirements for - * gfn_track (see struct kvm_mmu_page_role comments), and in practice - * no sane VMM mucks with the core vCPU model on the fly. + * Changing guest CPUID after KVM_RUN is forbidden, see the comment in + * kvm_arch_vcpu_ioctl(). */ - if (vcpu->arch.last_vmentry_cpu != -1) { - pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n"); - pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n"); - } + KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm); } void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) @@ -5369,7 +5382,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { - kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE); + kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE); ++vcpu->stat.invlpg; } EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); @@ -5854,8 +5867,6 @@ restart: void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot) { - bool flush = false; - if (kvm_memslots_have_rmaps(kvm)) { write_lock(&kvm->mmu_lock); /* @@ -5863,17 +5874,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, * logging at a 4k granularity and never creates collapsible * 2m SPTEs during dirty logging. */ - flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true); - if (flush) + if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true)) kvm_arch_flush_remote_tlbs_memslot(kvm, slot); write_unlock(&kvm->mmu_lock); } if (is_tdp_mmu_enabled(kvm)) { read_lock(&kvm->mmu_lock); - flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush); - if (flush) - kvm_arch_flush_remote_tlbs_memslot(kvm, slot); + kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot); read_unlock(&kvm->mmu_lock); } } @@ -6182,23 +6190,46 @@ void kvm_mmu_module_exit(void) mmu_audit_disable(); } +/* + * Calculate the effective recovery period, accounting for '0' meaning "let KVM + * select a halving time of 1 hour". Returns true if recovery is enabled. + */ +static bool calc_nx_huge_pages_recovery_period(uint *period) +{ + /* + * Use READ_ONCE to get the params, this may be called outside of the + * param setters, e.g. by the kthread to compute its next timeout. + */ + bool enabled = READ_ONCE(nx_huge_pages); + uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio); + + if (!enabled || !ratio) + return false; + + *period = READ_ONCE(nx_huge_pages_recovery_period_ms); + if (!*period) { + /* Make sure the period is not less than one second. */ + ratio = min(ratio, 3600u); + *period = 60 * 60 * 1000 / ratio; + } + return true; +} + static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp) { bool was_recovery_enabled, is_recovery_enabled; uint old_period, new_period; int err; - was_recovery_enabled = nx_huge_pages_recovery_ratio; - old_period = nx_huge_pages_recovery_period_ms; + was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period); err = param_set_uint(val, kp); if (err) return err; - is_recovery_enabled = nx_huge_pages_recovery_ratio; - new_period = nx_huge_pages_recovery_period_ms; + is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period); - if (READ_ONCE(nx_huge_pages) && is_recovery_enabled && + if (is_recovery_enabled && (!was_recovery_enabled || old_period > new_period)) { struct kvm *kvm; @@ -6262,18 +6293,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) static long get_nx_lpage_recovery_timeout(u64 start_time) { - uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio); - uint period = READ_ONCE(nx_huge_pages_recovery_period_ms); + bool enabled; + uint period; - if (!period && ratio) { - /* Make sure the period is not less than one second. */ - ratio = min(ratio, 3600u); - period = 60 * 60 * 1000 / ratio; - } + enabled = calc_nx_huge_pages_recovery_period(&period); - return READ_ONCE(nx_huge_pages) && ratio - ? start_time + msecs_to_jiffies(period) - get_jiffies_64() - : MAX_SCHEDULE_TIMEOUT; + return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64() + : MAX_SCHEDULE_TIMEOUT; } static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index f87d36898c44..708a5d297fe1 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -911,7 +911,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault r = RET_PF_RETRY; write_lock(&vcpu->kvm->mmu_lock); - if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva)) + + if (is_page_fault_stale(vcpu, fault, mmu_seq)) goto out_unlock; kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index a54c3491af42..1db8496259ad 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -317,9 +317,6 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); int level = sp->role.level; gfn_t base_gfn = sp->gfn; - u64 old_child_spte; - u64 *sptep; - gfn_t gfn; int i; trace_kvm_mmu_prepare_zap_page(sp); @@ -327,8 +324,9 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, tdp_mmu_unlink_page(kvm, sp, shared); for (i = 0; i < PT64_ENT_PER_PAGE; i++) { - sptep = rcu_dereference(pt) + i; - gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); + u64 *sptep = rcu_dereference(pt) + i; + gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); + u64 old_child_spte; if (shared) { /* @@ -374,7 +372,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, shared); } - kvm_flush_remote_tlbs_with_address(kvm, gfn, + kvm_flush_remote_tlbs_with_address(kvm, base_gfn, KVM_PAGES_PER_HPAGE(level + 1)); call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); @@ -1033,9 +1031,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, { struct kvm_mmu_page *root; - for_each_tdp_mmu_root(kvm, root, range->slot->as_id) - flush |= zap_gfn_range(kvm, root, range->start, range->end, - range->may_block, flush, false); + for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false) + flush = zap_gfn_range(kvm, root, range->start, range->end, + range->may_block, flush, false); return flush; } @@ -1364,10 +1362,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, * Clear leaf entries which could be replaced by large mappings, for * GFNs within the slot. */ -static bool zap_collapsible_spte_range(struct kvm *kvm, +static void zap_collapsible_spte_range(struct kvm *kvm, struct kvm_mmu_page *root, - const struct kvm_memory_slot *slot, - bool flush) + const struct kvm_memory_slot *slot) { gfn_t start = slot->base_gfn; gfn_t end = start + slot->npages; @@ -1378,10 +1375,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm, tdp_root_for_each_pte(iter, root, start, end) { retry: - if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) { - flush = false; + if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; - } if (!is_shadow_present_pte(iter.old_spte) || !is_last_spte(iter.old_spte, iter.level)) @@ -1393,6 +1388,7 @@ retry: pfn, PG_LEVEL_NUM)) continue; + /* Note, a successful atomic zap also does a remote TLB flush. */ if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { /* * The iter must explicitly re-read the SPTE because @@ -1401,30 +1397,24 @@ retry: iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); goto retry; } - flush = true; } rcu_read_unlock(); - - return flush; } /* * Clear non-leaf entries (and free associated page tables) which could * be replaced by large mappings, for GFNs within the slot. */ -bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, - const struct kvm_memory_slot *slot, - bool flush) +void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, + const struct kvm_memory_slot *slot) { struct kvm_mmu_page *root; lockdep_assert_held_read(&kvm->mmu_lock); for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) - flush = zap_collapsible_spte_range(kvm, root, slot, flush); - - return flush; + zap_collapsible_spte_range(kvm, root, slot); } /* diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 476b133544dd..3899004a5d91 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot); -bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, - const struct kvm_memory_slot *slot, - bool flush); +void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, + const struct kvm_memory_slot *slot); bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index affc0ea98d30..8f9af7b7dbbe 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -900,6 +900,7 @@ out: bool svm_check_apicv_inhibit_reasons(ulong bit) { ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | + BIT(APICV_INHIBIT_REASON_ABSENT) | BIT(APICV_INHIBIT_REASON_HYPERV) | BIT(APICV_INHIBIT_REASON_NESTED) | BIT(APICV_INHIBIT_REASON_IRQWIN) | @@ -989,16 +990,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu) static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run) { struct vcpu_svm *svm = to_svm(vcpu); + int cpu = get_cpu(); + WARN_ON(cpu != vcpu->cpu); svm->avic_is_running = is_run; - if (!kvm_vcpu_apicv_active(vcpu)) - return; - - if (is_run) - avic_vcpu_load(vcpu, vcpu->cpu); - else - avic_vcpu_put(vcpu); + if (kvm_vcpu_apicv_active(vcpu)) { + if (is_run) + avic_vcpu_load(vcpu, cpu); + else + avic_vcpu_put(vcpu); + } + put_cpu(); } void svm_vcpu_blocking(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 871c426ec389..b4095dfeeee6 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -281,7 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu) pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; - pmu->reserved_bits = 0xffffffff00200000ull; + pmu->reserved_bits = 0xfffffff000280000ull; pmu->version = 1; /* not applicable to AMD; but clean them to prevent any fall out */ pmu->counter_bitmask[KVM_PMC_FIXED] = 0; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 21ac0a5de4e0..7656a2c5662a 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1543,28 +1543,50 @@ static bool is_cmd_allowed_from_mirror(u32 cmd_id) return false; } -static int sev_lock_for_migration(struct kvm *kvm) +static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) { - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info; + struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info; + int r = -EBUSY; + + if (dst_kvm == src_kvm) + return -EINVAL; /* - * Bail if this VM is already involved in a migration to avoid deadlock - * between two VMs trying to migrate to/from each other. + * Bail if these VMs are already involved in a migration to avoid + * deadlock between two VMs trying to migrate to/from each other. */ - if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1)) + if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1)) return -EBUSY; - mutex_lock(&kvm->lock); + if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1)) + goto release_dst; + r = -EINTR; + if (mutex_lock_killable(&dst_kvm->lock)) + goto release_src; + if (mutex_lock_killable(&src_kvm->lock)) + goto unlock_dst; return 0; + +unlock_dst: + mutex_unlock(&dst_kvm->lock); +release_src: + atomic_set_release(&src_sev->migration_in_progress, 0); +release_dst: + atomic_set_release(&dst_sev->migration_in_progress, 0); + return r; } -static void sev_unlock_after_migration(struct kvm *kvm) +static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) { - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info; + struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info; - mutex_unlock(&kvm->lock); - atomic_set_release(&sev->migration_in_progress, 0); + mutex_unlock(&dst_kvm->lock); + mutex_unlock(&src_kvm->lock); + atomic_set_release(&dst_sev->migration_in_progress, 0); + atomic_set_release(&src_sev->migration_in_progress, 0); } @@ -1607,14 +1629,15 @@ static void sev_migrate_from(struct kvm_sev_info *dst, dst->asid = src->asid; dst->handle = src->handle; dst->pages_locked = src->pages_locked; + dst->enc_context_owner = src->enc_context_owner; src->asid = 0; src->active = false; src->handle = 0; src->pages_locked = 0; + src->enc_context_owner = NULL; - INIT_LIST_HEAD(&dst->regions_list); - list_replace_init(&src->regions_list, &dst->regions_list); + list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list); } static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) @@ -1666,15 +1689,6 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) bool charged = false; int ret; - ret = sev_lock_for_migration(kvm); - if (ret) - return ret; - - if (sev_guest(kvm)) { - ret = -EINVAL; - goto out_unlock; - } - source_kvm_file = fget(source_fd); if (!file_is_kvm(source_kvm_file)) { ret = -EBADF; @@ -1682,16 +1696,26 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) } source_kvm = source_kvm_file->private_data; - ret = sev_lock_for_migration(source_kvm); + ret = sev_lock_two_vms(kvm, source_kvm); if (ret) goto out_fput; - if (!sev_guest(source_kvm)) { + if (sev_guest(kvm) || !sev_guest(source_kvm)) { ret = -EINVAL; - goto out_source; + goto out_unlock; } src_sev = &to_kvm_svm(source_kvm)->sev_info; + + /* + * VMs mirroring src's encryption context rely on it to keep the + * ASID allocated, but below we are clearing src_sev->asid. + */ + if (src_sev->num_mirrored_vms) { + ret = -EBUSY; + goto out_unlock; + } + dst_sev->misc_cg = get_current_misc_cg(); cg_cleanup_sev = dst_sev; if (dst_sev->misc_cg != src_sev->misc_cg) { @@ -1728,13 +1752,11 @@ out_dst_cgroup: sev_misc_cg_uncharge(cg_cleanup_sev); put_misc_cg(cg_cleanup_sev->misc_cg); cg_cleanup_sev->misc_cg = NULL; -out_source: - sev_unlock_after_migration(source_kvm); +out_unlock: + sev_unlock_two_vms(kvm, source_kvm); out_fput: if (source_kvm_file) fput(source_kvm_file); -out_unlock: - sev_unlock_after_migration(kvm); return ret; } @@ -1953,76 +1975,60 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd) { struct file *source_kvm_file; struct kvm *source_kvm; - struct kvm_sev_info source_sev, *mirror_sev; + struct kvm_sev_info *source_sev, *mirror_sev; int ret; source_kvm_file = fget(source_fd); if (!file_is_kvm(source_kvm_file)) { ret = -EBADF; - goto e_source_put; + goto e_source_fput; } source_kvm = source_kvm_file->private_data; - mutex_lock(&source_kvm->lock); - - if (!sev_guest(source_kvm)) { - ret = -EINVAL; - goto e_source_unlock; - } + ret = sev_lock_two_vms(kvm, source_kvm); + if (ret) + goto e_source_fput; - /* Mirrors of mirrors should work, but let's not get silly */ - if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) { + /* + * Mirrors of mirrors should work, but let's not get silly. Also + * disallow out-of-band SEV/SEV-ES init if the target is already an + * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being + * created after SEV/SEV-ES initialization, e.g. to init intercepts. + */ + if (sev_guest(kvm) || !sev_guest(source_kvm) || + is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) { ret = -EINVAL; - goto e_source_unlock; + goto e_unlock; } - memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info, - sizeof(source_sev)); - /* * The mirror kvm holds an enc_context_owner ref so its asid can't * disappear until we're done with it */ + source_sev = &to_kvm_svm(source_kvm)->sev_info; kvm_get_kvm(source_kvm); - - fput(source_kvm_file); - mutex_unlock(&source_kvm->lock); - mutex_lock(&kvm->lock); - - /* - * Disallow out-of-band SEV/SEV-ES init if the target is already an - * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being - * created after SEV/SEV-ES initialization, e.g. to init intercepts. - */ - if (sev_guest(kvm) || kvm->created_vcpus) { - ret = -EINVAL; - goto e_mirror_unlock; - } + source_sev->num_mirrored_vms++; /* Set enc_context_owner and copy its encryption context over */ mirror_sev = &to_kvm_svm(kvm)->sev_info; mirror_sev->enc_context_owner = source_kvm; mirror_sev->active = true; - mirror_sev->asid = source_sev.asid; - mirror_sev->fd = source_sev.fd; - mirror_sev->es_active = source_sev.es_active; - mirror_sev->handle = source_sev.handle; + mirror_sev->asid = source_sev->asid; + mirror_sev->fd = source_sev->fd; + mirror_sev->es_active = source_sev->es_active; + mirror_sev->handle = source_sev->handle; + INIT_LIST_HEAD(&mirror_sev->regions_list); + ret = 0; + /* * Do not copy ap_jump_table. Since the mirror does not share the same * KVM contexts as the original, and they may have different * memory-views. */ - mutex_unlock(&kvm->lock); - return 0; - -e_mirror_unlock: - mutex_unlock(&kvm->lock); - kvm_put_kvm(source_kvm); - return ret; -e_source_unlock: - mutex_unlock(&source_kvm->lock); -e_source_put: +e_unlock: + sev_unlock_two_vms(kvm, source_kvm); +e_source_fput: if (source_kvm_file) fput(source_kvm_file); return ret; @@ -2034,17 +2040,24 @@ void sev_vm_destroy(struct kvm *kvm) struct list_head *head = &sev->regions_list; struct list_head *pos, *q; + WARN_ON(sev->num_mirrored_vms); + if (!sev_guest(kvm)) return; /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */ if (is_mirroring_enc_context(kvm)) { - kvm_put_kvm(sev->enc_context_owner); + struct kvm *owner_kvm = sev->enc_context_owner; + struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info; + + mutex_lock(&owner_kvm->lock); + if (!WARN_ON(!owner_sev->num_mirrored_vms)) + owner_sev->num_mirrored_vms--; + mutex_unlock(&owner_kvm->lock); + kvm_put_kvm(owner_kvm); return; } - mutex_lock(&kvm->lock); - /* * Ensure that all guest tagged cache entries are flushed before * releasing the pages back to the system for use. CLFLUSH will @@ -2064,8 +2077,6 @@ void sev_vm_destroy(struct kvm *kvm) } } - mutex_unlock(&kvm->lock); - sev_unbind_asid(kvm, sev->handle); sev_asid_free(sev); } @@ -2249,7 +2260,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) __free_page(virt_to_page(svm->sev_es.vmsa)); if (svm->sev_es.ghcb_sa_free) - kfree(svm->sev_es.ghcb_sa); + kvfree(svm->sev_es.ghcb_sa); } static void dump_ghcb(struct vcpu_svm *svm) @@ -2341,24 +2352,29 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); } -static int sev_es_validate_vmgexit(struct vcpu_svm *svm) +static bool sev_es_validate_vmgexit(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu; struct ghcb *ghcb; - u64 exit_code = 0; + u64 exit_code; + u64 reason; ghcb = svm->sev_es.ghcb; - /* Only GHCB Usage code 0 is supported */ - if (ghcb->ghcb_usage) - goto vmgexit_err; - /* - * Retrieve the exit code now even though is may not be marked valid + * Retrieve the exit code now even though it may not be marked valid * as it could help with debugging. */ exit_code = ghcb_get_sw_exit_code(ghcb); + /* Only GHCB Usage code 0 is supported */ + if (ghcb->ghcb_usage) { + reason = GHCB_ERR_INVALID_USAGE; + goto vmgexit_err; + } + + reason = GHCB_ERR_MISSING_INPUT; + if (!ghcb_sw_exit_code_is_valid(ghcb) || !ghcb_sw_exit_info_1_is_valid(ghcb) || !ghcb_sw_exit_info_2_is_valid(ghcb)) @@ -2437,30 +2453,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) case SVM_VMGEXIT_UNSUPPORTED_EVENT: break; default: + reason = GHCB_ERR_INVALID_EVENT; goto vmgexit_err; } - return 0; + return true; vmgexit_err: vcpu = &svm->vcpu; - if (ghcb->ghcb_usage) { + if (reason == GHCB_ERR_INVALID_USAGE) { vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", ghcb->ghcb_usage); + } else if (reason == GHCB_ERR_INVALID_EVENT) { + vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n", + exit_code); } else { - vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n", + vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n", exit_code); dump_ghcb(svm); } - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; - vcpu->run->internal.ndata = 2; - vcpu->run->internal.data[0] = exit_code; - vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; + /* Clear the valid entries fields */ + memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); + + ghcb_set_sw_exit_info_1(ghcb, 2); + ghcb_set_sw_exit_info_2(ghcb, reason); - return -EINVAL; + return false; } void sev_es_unmap_ghcb(struct vcpu_svm *svm) @@ -2482,7 +2502,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) svm->sev_es.ghcb_sa_sync = false; } - kfree(svm->sev_es.ghcb_sa); + kvfree(svm->sev_es.ghcb_sa); svm->sev_es.ghcb_sa = NULL; svm->sev_es.ghcb_sa_free = false; } @@ -2530,14 +2550,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) scratch_gpa_beg = ghcb_get_sw_scratch(ghcb); if (!scratch_gpa_beg) { pr_err("vmgexit: scratch gpa not provided\n"); - return false; + goto e_scratch; } scratch_gpa_end = scratch_gpa_beg + len; if (scratch_gpa_end < scratch_gpa_beg) { pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n", len, scratch_gpa_beg); - return false; + goto e_scratch; } if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) { @@ -2555,7 +2575,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) scratch_gpa_end > ghcb_scratch_end) { pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n", scratch_gpa_beg, scratch_gpa_end); - return false; + goto e_scratch; } scratch_va = (void *)svm->sev_es.ghcb; @@ -2568,18 +2588,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) if (len > GHCB_SCRATCH_AREA_LIMIT) { pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n", len, GHCB_SCRATCH_AREA_LIMIT); - return false; + goto e_scratch; } - scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT); + scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT); if (!scratch_va) - return false; + goto e_scratch; if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { /* Unable to copy scratch area from guest */ pr_err("vmgexit: kvm_read_guest for scratch area failed\n"); - kfree(scratch_va); - return false; + kvfree(scratch_va); + goto e_scratch; } /* @@ -2596,6 +2616,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) svm->sev_es.ghcb_sa_len = len; return true; + +e_scratch: + ghcb_set_sw_exit_info_1(ghcb, 2); + ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); + + return false; } static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask, @@ -2646,7 +2672,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID); if (!ret) { - ret = -EINVAL; + /* Error, keep GHCB MSR value as-is */ break; } @@ -2682,10 +2708,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) GHCB_MSR_TERM_REASON_POS); pr_info("SEV-ES guest requested termination: %#llx:%#llx\n", reason_set, reason_code); - fallthrough; + + ret = -EINVAL; + break; } default: - ret = -EINVAL; + /* Error, keep GHCB MSR value as-is */ + break; } trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id, @@ -2709,14 +2738,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) if (!ghcb_gpa) { vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n"); - return -EINVAL; + + /* Without a GHCB, just return right back to the guest */ + return 1; } if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { /* Unable to map GHCB from guest */ vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n", ghcb_gpa); - return -EINVAL; + + /* Without a GHCB, just return right back to the guest */ + return 1; } svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; @@ -2726,15 +2759,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) exit_code = ghcb_get_sw_exit_code(ghcb); - ret = sev_es_validate_vmgexit(svm); - if (ret) - return ret; + if (!sev_es_validate_vmgexit(svm)) + return 1; sev_es_sync_from_ghcb(svm); ghcb_set_sw_exit_info_1(ghcb, 0); ghcb_set_sw_exit_info_2(ghcb, 0); - ret = -EINVAL; + ret = 1; switch (exit_code) { case SVM_VMGEXIT_MMIO_READ: if (!setup_vmgexit_scratch(svm, true, control->exit_info_2)) @@ -2775,20 +2807,17 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) default: pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n", control->exit_info_1); - ghcb_set_sw_exit_info_1(ghcb, 1); - ghcb_set_sw_exit_info_2(ghcb, - X86_TRAP_UD | - SVM_EVTINJ_TYPE_EXEPT | - SVM_EVTINJ_VALID); + ghcb_set_sw_exit_info_1(ghcb, 2); + ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT); } - ret = 1; break; } case SVM_VMGEXIT_UNSUPPORTED_EVENT: vcpu_unimpl(vcpu, "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n", control->exit_info_1, control->exit_info_2); + ret = -EINVAL; break; default: ret = svm_invoke_exit_handler(vcpu, exit_code); @@ -2810,7 +2839,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) return -EINVAL; if (!setup_vmgexit_scratch(svm, in, bytes)) - return -EINVAL; + return 1; return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa, count, in); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5630c241d5f6..d0f68d11ec70 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4651,7 +4651,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .load_eoi_exitmap = svm_load_eoi_exitmap, .hwapic_irr_update = svm_hwapic_irr_update, .hwapic_isr_update = svm_hwapic_isr_update, - .sync_pir_to_irr = kvm_lapic_find_highest_irr, .apicv_post_state_restore = avic_post_state_restore, .set_tss_addr = svm_set_tss_addr, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 5faad3dc10e2..1c7306c370fa 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -79,6 +79,7 @@ struct kvm_sev_info { struct list_head regions_list; /* List of registered regions */ u64 ap_jump_table; /* SEV-ES AP Jump Table address */ struct kvm *enc_context_owner; /* Owner of copied encryption context */ + unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */ struct misc_cg *misc_cg; /* For misc cgroup accounting */ atomic_t migration_in_progress; }; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 1e2f66951566..9c941535f78c 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1162,29 +1162,26 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, WARN_ON(!enable_vpid); /* - * If VPID is enabled and used by vmc12, but L2 does not have a unique - * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate - * a VPID for L2, flush the current context as the effective ASID is - * common to both L1 and L2. - * - * Defer the flush so that it runs after vmcs02.EPTP has been set by - * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid - * redundant flushes further down the nested pipeline. - * - * If a TLB flush isn't required due to any of the above, and vpid12 is - * changing then the new "virtual" VPID (vpid12) will reuse the same - * "real" VPID (vpid02), and so needs to be flushed. There's no direct - * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for - * all nested vCPUs. Remember, a flush on VM-Enter does not invalidate - * guest-physical mappings, so there is no need to sync the nEPT MMU. + * VPID is enabled and in use by vmcs12. If vpid12 is changing, then + * emulate a guest TLB flush as KVM does not track vpid12 history nor + * is the VPID incorporated into the MMU context. I.e. KVM must assume + * that the new vpid12 has never been used and thus represents a new + * guest ASID that cannot have entries in the TLB. */ - if (!nested_has_guest_tlb_tag(vcpu)) { - kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); - } else if (is_vmenter && - vmcs12->virtual_processor_id != vmx->nested.last_vpid) { + if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; - vpid_sync_context(nested_get_vpid02(vcpu)); + kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); + return; } + + /* + * If VPID is enabled, used by vmc12, and vpid12 is not changing but + * does not have a unique TLB tag (ASID), i.e. EPT is disabled and + * KVM was unable to allocate a VPID for L2, flush the current context + * as the effective ASID is common to both L1 and L2. + */ + if (!nested_has_guest_tlb_tag(vcpu)) + kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) @@ -2594,8 +2591,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, - vmcs12->guest_ia32_perf_global_ctrl))) + vmcs12->guest_ia32_perf_global_ctrl))) { + *entry_failure_code = ENTRY_FAIL_DEFAULT; return -EINVAL; + } kvm_rsp_write(vcpu, vmcs12->guest_rsp); kvm_rip_write(vcpu, vmcs12->guest_rip); @@ -3344,8 +3343,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, }; u32 failed_index; - if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) - kvm_vcpu_flush_tlb_current(vcpu); + kvm_service_local_tlb_flush_requests(vcpu); evaluate_pending_interrupts = exec_controls_get(vmx) & (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING); @@ -4502,9 +4500,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, (void)nested_get_evmcs_page(vcpu); } - /* Service the TLB flush request for L2 before switching to L1. */ - if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) - kvm_vcpu_flush_tlb_current(vcpu); + /* Service pending TLB flush requests for L2 before switching to L1. */ + kvm_service_local_tlb_flush_requests(vcpu); /* * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between @@ -4857,6 +4854,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) if (!vmx->nested.cached_vmcs12) goto out_cached_vmcs12; + vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA; vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); if (!vmx->nested.cached_shadow_vmcs12) goto out_cached_shadow_vmcs12; @@ -5289,8 +5287,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache; struct vmcs_hdr hdr; - if (ghc->gpa != vmptr && - kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) { /* * Reads from an unbacked page return all 1s, * which means that the 32 bits located at the diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 5f81ef092bd4..1c94783b5a54 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -5,6 +5,7 @@ #include <asm/cpu.h> #include "lapic.h" +#include "irq.h" #include "posted_intr.h" #include "trace.h" #include "vmx.h" @@ -77,13 +78,18 @@ after_clear_sn: pi_set_on(pi_desc); } +static bool vmx_can_use_vtd_pi(struct kvm *kvm) +{ + return irqchip_in_kernel(kvm) && enable_apicv && + kvm_arch_has_assigned_device(kvm) && + irq_remapping_cap(IRQ_POSTING_CAP); +} + void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - if (!kvm_arch_has_assigned_device(vcpu->kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(vcpu)) + if (!vmx_can_use_vtd_pi(vcpu->kvm)) return; /* Set SN when the vCPU is preempted */ @@ -141,9 +147,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu) struct pi_desc old, new; struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - if (!kvm_arch_has_assigned_device(vcpu->kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(vcpu)) + if (!vmx_can_use_vtd_pi(vcpu->kvm)) return 0; WARN_ON(irqs_disabled()); @@ -270,9 +274,7 @@ int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, struct vcpu_data vcpu_info; int idx, ret = 0; - if (!kvm_arch_has_assigned_device(kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(kvm->vcpus[0])) + if (!vmx_can_use_vtd_pi(kvm)) return 0; idx = srcu_read_lock(&kvm->irq_srcu); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ba66c171d951..9453743ce0c4 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2918,6 +2918,13 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu) } } +static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu) +{ + if (is_guest_mode(vcpu)) + return nested_get_vpid02(vcpu); + return to_vmx(vcpu)->vpid; +} + static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.mmu; @@ -2930,31 +2937,29 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu) if (enable_ept) ept_sync_context(construct_eptp(vcpu, root_hpa, mmu->shadow_root_level)); - else if (!is_guest_mode(vcpu)) - vpid_sync_context(to_vmx(vcpu)->vpid); else - vpid_sync_context(nested_get_vpid02(vcpu)); + vpid_sync_context(vmx_get_current_vpid(vcpu)); } static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) { /* - * vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in + * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in * vmx_flush_tlb_guest() for an explanation of why this is ok. */ - vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr); + vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr); } static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu) { /* - * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0 - * or a vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit - * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is + * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a + * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are + * required to flush GVA->{G,H}PA mappings from the TLB if vpid is * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed), * i.e. no explicit INVVPID is necessary. */ - vpid_sync_context(to_vmx(vcpu)->vpid); + vpid_sync_context(vmx_get_current_vpid(vcpu)); } void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu) @@ -6262,9 +6267,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int max_irr; - bool max_irr_updated; + bool got_posted_interrupt; - if (KVM_BUG_ON(!vcpu->arch.apicv_active, vcpu->kvm)) + if (KVM_BUG_ON(!enable_apicv, vcpu->kvm)) return -EIO; if (pi_test_on(&vmx->pi_desc)) { @@ -6274,22 +6279,33 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) * But on x86 this is just a compiler barrier anyway. */ smp_mb__after_atomic(); - max_irr_updated = + got_posted_interrupt = kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); - - /* - * If we are running L2 and L1 has a new pending interrupt - * which can be injected, this may cause a vmexit or it may - * be injected into L2. Either way, this interrupt will be - * processed via KVM_REQ_EVENT, not RVI, because we do not use - * virtual interrupt delivery to inject L1 interrupts into L2. - */ - if (is_guest_mode(vcpu) && max_irr_updated) - kvm_make_request(KVM_REQ_EVENT, vcpu); } else { max_irr = kvm_lapic_find_highest_irr(vcpu); + got_posted_interrupt = false; } - vmx_hwapic_irr_update(vcpu, max_irr); + + /* + * Newly recognized interrupts are injected via either virtual interrupt + * delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is + * disabled in two cases: + * + * 1) If L2 is running and the vCPU has a new pending interrupt. If L1 + * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a + * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected + * into L2, but KVM doesn't use virtual interrupt delivery to inject + * interrupts into L2, and so KVM_REQ_EVENT is again needed. + * + * 2) If APICv is disabled for this vCPU, assigned devices may still + * attempt to post interrupts. The posted interrupt vector will cause + * a VM-Exit and the subsequent entry will call sync_pir_to_irr. + */ + if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu)) + vmx_set_rvi(max_irr); + else if (got_posted_interrupt) + kvm_make_request(KVM_REQ_EVENT, vcpu); + return max_irr; } @@ -7509,6 +7525,7 @@ static void hardware_unsetup(void) static bool vmx_check_apicv_inhibit_reasons(ulong bit) { ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | + BIT(APICV_INHIBIT_REASON_ABSENT) | BIT(APICV_INHIBIT_REASON_HYPERV) | BIT(APICV_INHIBIT_REASON_BLOCKIRQ); @@ -7761,10 +7778,10 @@ static __init int hardware_setup(void) ple_window_shrink = 0; } - if (!cpu_has_vmx_apicv()) { + if (!cpu_has_vmx_apicv()) enable_apicv = 0; + if (!enable_apicv) vmx_x86_ops.sync_pir_to_irr = NULL; - } if (cpu_has_vmx_tsc_scaling()) { kvm_has_tsc_control = true; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5a403d92833f..e0aa4dd53c7f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3258,6 +3258,29 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) static_call(kvm_x86_tlb_flush_guest)(vcpu); } + +static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) +{ + ++vcpu->stat.tlb_flush; + static_call(kvm_x86_tlb_flush_current)(vcpu); +} + +/* + * Service "local" TLB flush requests, which are specific to the current MMU + * context. In addition to the generic event handling in vcpu_enter_guest(), + * TLB flushes that are targeted at an MMU context also need to be serviced + * prior before nested VM-Enter/VM-Exit. + */ +void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) +{ + if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) + kvm_vcpu_flush_tlb_current(vcpu); + + if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) + kvm_vcpu_flush_tlb_guest(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); + static void record_steal_time(struct kvm_vcpu *vcpu) { struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; @@ -4133,6 +4156,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SGX_ATTRIBUTE: #endif case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: + case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: case KVM_CAP_SREGS2: case KVM_CAP_EXIT_ON_EMULATION_FAILURE: case KVM_CAP_VCPU_ATTRIBUTES: @@ -4448,8 +4472,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { - if (vcpu->arch.apicv_active) - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); return kvm_apic_get_state(vcpu, s); } @@ -5124,6 +5147,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid cpuid; + /* + * KVM does not correctly handle changing guest CPUID after KVM_RUN, as + * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't + * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page + * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with + * the core vCPU model on the fly, so fail. + */ + r = -EINVAL; + if (vcpu->arch.last_vmentry_cpu != -1) + goto out; + r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; @@ -5134,6 +5168,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; + /* + * KVM_SET_CPUID{,2} after KVM_RUN is forbidded, see the comment in + * KVM_SET_CPUID case above. + */ + r = -EINVAL; + if (vcpu->arch.last_vmentry_cpu != -1) + goto out; + r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; @@ -5698,6 +5740,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, smp_wmb(); kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; + kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT); r = 0; split_irqchip_unlock: mutex_unlock(&kvm->lock); @@ -6078,6 +6121,7 @@ set_identity_unlock: /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ smp_wmb(); kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; + kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT); create_irqchip_unlock: mutex_unlock(&kvm->lock); break; @@ -8776,10 +8820,9 @@ static void kvm_apicv_init(struct kvm *kvm) { init_rwsem(&kvm->arch.apicv_update_lock); - if (enable_apicv) - clear_bit(APICV_INHIBIT_REASON_DISABLE, - &kvm->arch.apicv_inhibit_reasons); - else + set_bit(APICV_INHIBIT_REASON_ABSENT, + &kvm->arch.apicv_inhibit_reasons); + if (!enable_apicv) set_bit(APICV_INHIBIT_REASON_DISABLE, &kvm->arch.apicv_inhibit_reasons); } @@ -9528,8 +9571,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) if (irqchip_split(vcpu->kvm)) kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); else { - if (vcpu->arch.apicv_active) - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); if (ioapic_in_kernel(vcpu->kvm)) kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); } @@ -9648,10 +9690,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) /* Flushing all ASIDs flushes the current ASID... */ kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } - if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) - kvm_vcpu_flush_tlb_current(vcpu); - if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) - kvm_vcpu_flush_tlb_guest(vcpu); + kvm_service_local_tlb_flush_requests(vcpu); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; @@ -9802,10 +9841,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) /* * This handles the case where a posted interrupt was - * notified with kvm_vcpu_kick. + * notified with kvm_vcpu_kick. Assigned devices can + * use the POSTED_INTR_VECTOR even if APICv is disabled, + * so do it even if APICv is disabled on this vCPU. */ - if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + if (kvm_lapic_enabled(vcpu)) + static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); if (kvm_vcpu_exit_request(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; @@ -9849,8 +9890,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) break; - if (vcpu->arch.apicv_active) - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + if (kvm_lapic_enabled(vcpu)) + static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); if (unlikely(kvm_vcpu_exit_request(vcpu))) { exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 997669ae9caa..4abcd8d9836d 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -103,6 +103,7 @@ static inline unsigned int __shrink_ple_window(unsigned int val, #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL +void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); int kvm_check_nested_events(struct kvm_vcpu *vcpu); static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) @@ -185,12 +186,6 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; } -static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) -{ - ++vcpu->stat.tlb_flush; - static_call(kvm_x86_tlb_flush_current)(vcpu); -} - static inline int is_pae(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 726700fabca6..1d7b0c69b644 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * bpf_jit_comp.c: BPF JIT compiler + * BPF JIT compiler * * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) - * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com */ #include <linux/netdevice.h> #include <linux/filter.h> @@ -412,7 +412,7 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... * if (index >= array->map.max_entries) * goto out; - * if (++tail_call_cnt > MAX_TAIL_CALL_CNT) + * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) * goto out; * prog = array->ptrs[index]; * if (prog == NULL) @@ -446,14 +446,14 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, EMIT2(X86_JBE, offset); /* jbe out */ /* - * if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) * goto out; */ EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ offset = ctx->tail_call_indirect_label - (prog + 2 - start); - EMIT2(X86_JA, offset); /* ja out */ + EMIT2(X86_JAE, offset); /* jae out */ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ @@ -504,14 +504,14 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, int offset; /* - * if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) * goto out; */ EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ offset = ctx->tail_call_direct_label - (prog + 2 - start); - EMIT2(X86_JA, offset); /* ja out */ + EMIT2(X86_JAE, offset); /* jae out */ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index da9b7cfa4632..429a89c5468b 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -1323,7 +1323,7 @@ static void emit_bpf_tail_call(u8 **pprog, u8 *ip) EMIT2(IA32_JBE, jmp_label(jmp_label1, 2)); /* - * if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) * goto out; */ lo = (u32)MAX_TAIL_CALL_CNT; @@ -1337,7 +1337,7 @@ static void emit_bpf_tail_call(u8 **pprog, u8 *ip) /* cmp ecx,lo */ EMIT3(0x83, add_1reg(0xF8, IA32_ECX), lo); - /* ja out */ + /* jae out */ EMIT2(IA32_JAE, jmp_label(jmp_label1, 2)); /* add eax,0x1 */ diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index b15ebfe40a73..b0b848d6933a 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -277,7 +277,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) return; } - new = early_memremap(data.phys_map, data.size); + new = early_memremap_prot(data.phys_map, data.size, + pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL))); if (!new) { pr_err("Failed to map new boot services memmap\n"); return; diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 4a3da7592b99..38d24d2ab38b 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -72,6 +72,7 @@ static void __init setup_real_mode(void) #ifdef CONFIG_X86_64 u64 *trampoline_pgd; u64 efer; + int i; #endif base = (unsigned char *)real_mode_header; @@ -128,8 +129,17 @@ static void __init setup_real_mode(void) trampoline_header->flags = 0; trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); + + /* Map the real mode stub as virtual == physical */ trampoline_pgd[0] = trampoline_pgd_entry.pgd; - trampoline_pgd[511] = init_top_pgt[511].pgd; + + /* + * Include the entirety of the kernel mapping into the trampoline + * PGD. This way, all mappings present in the normal kernel page + * tables are usable while running on trampoline_pgd. + */ + for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++) + trampoline_pgd[i] = init_top_pgt[i].pgd; #endif sme_sev_setup_real_mode(trampoline_header); diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 220dd9678494..444d824775f6 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <../entry/calling.h> .pushsection .noinstr.text, "ax" /* @@ -193,6 +194,25 @@ SYM_CODE_START(xen_iret) SYM_CODE_END(xen_iret) /* + * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is + * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode() + * in XEN pv would cause %rsp to move up to the top of the kernel stack and + * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI + * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET + * frame at the same address is useless. + */ +SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode) + UNWIND_HINT_REGS + POP_REGS + + /* stackleak_erase() can work safely on the kernel stack. */ + STACKLEAK_ERASE_NOCLOBBER + + addq $8, %rsp /* skip regs->orig_ax */ + jmp xen_iret +SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode) + +/* * Xen handles syscall callbacks much like ordinary exceptions, which * means we have: * - kernel gs diff --git a/block/blk-mq.c b/block/blk-mq.c index 8799fa73ef34..8874a63ae952 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -860,13 +860,14 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob) if (iob->need_ts) __blk_mq_end_request_acct(rq, now); + rq_qos_done(rq->q, rq); + WRITE_ONCE(rq->state, MQ_RQ_IDLE); if (!refcount_dec_and_test(&rq->ref)) continue; blk_crypto_free_request(rq); blk_pm_mark_last_busy(rq); - rq_qos_done(rq->q, rq); if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { if (cur_hctx) diff --git a/block/fops.c b/block/fops.c index ad732a36f9b3..3cb1e81929bc 100644 --- a/block/fops.c +++ b/block/fops.c @@ -15,6 +15,7 @@ #include <linux/falloc.h> #include <linux/suspend.h> #include <linux/fs.h> +#include <linux/module.h> #include "blk.h" static inline struct inode *bdev_file_inode(struct file *file) diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index 5b78e86e3459..b9c77885b872 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -827,7 +827,7 @@ static ssize_t ata_scsi_lpm_show(struct device *dev, if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names)) return -EINVAL; - return snprintf(buf, PAGE_SIZE, "%s\n", + return sysfs_emit(buf, "%s\n", ata_lpm_policy_names[ap->target_lpm_policy]); } DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c index 121635aa8c00..823c88622e34 100644 --- a/drivers/ata/pata_falcon.c +++ b/drivers/ata/pata_falcon.c @@ -55,14 +55,14 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc, /* Transfer multiple of 2 bytes */ if (rw == READ) { if (swap) - raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words); + raw_insw_swapw(data_addr, (u16 *)buf, words); else - raw_insw((u16 *)data_addr, (u16 *)buf, words); + raw_insw(data_addr, (u16 *)buf, words); } else { if (swap) - raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words); + raw_outsw_swapw(data_addr, (u16 *)buf, words); else - raw_outsw((u16 *)data_addr, (u16 *)buf, words); + raw_outsw(data_addr, (u16 *)buf, words); } /* Transfer trailing byte, if any. */ @@ -74,16 +74,16 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc, if (rw == READ) { if (swap) - raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1); + raw_insw_swapw(data_addr, (u16 *)pad, 1); else - raw_insw((u16 *)data_addr, (u16 *)pad, 1); + raw_insw(data_addr, (u16 *)pad, 1); *buf = pad[0]; } else { pad[0] = *buf; if (swap) - raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1); + raw_outsw_swapw(data_addr, (u16 *)pad, 1); else - raw_outsw((u16 *)data_addr, (u16 *)pad, 1); + raw_outsw(data_addr, (u16 *)pad, 1); } words++; } diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index e5838b23c9e0..3b31a4f596d8 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host) return 0; } +static void sata_fsl_host_stop(struct ata_host *host) +{ + struct sata_fsl_host_priv *host_priv = host->private_data; + + iounmap(host_priv->hcr_base); + kfree(host_priv); +} + /* * scsi mid-layer and libata interface structures */ @@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = { .port_start = sata_fsl_port_start, .port_stop = sata_fsl_port_stop, + .host_stop = sata_fsl_host_stop, + .pmp_attach = sata_fsl_pmp_attach, .pmp_detach = sata_fsl_pmp_detach, }; @@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev) host_priv->ssr_base = ssr_base; host_priv->csr_base = csr_base; - irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); - if (!irq) { - dev_err(&ofdev->dev, "invalid irq from platform\n"); + irq = platform_get_irq(ofdev, 0); + if (irq < 0) { + retval = irq; goto error_exit_with_cleanup; } host_priv->irq = irq; @@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev) ata_host_detach(host); - irq_dispose_mapping(host_priv->irq); - iounmap(host_priv->hcr_base); - kfree(host_priv); - return 0; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index a154cab6cd98..c3a36cfaa855 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -2103,7 +2103,7 @@ static int loop_control_remove(int idx) int ret; if (idx < 0) { - pr_warn("deleting an unspecified loop device is not supported.\n"); + pr_warn_once("deleting an unspecified loop device is not supported.\n"); return -EINVAL; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 97bf051a50ce..6ae38776e30e 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -316,7 +316,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req = bd->rq; struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); unsigned long flags; - unsigned int num; + int num; int qid = hctx->queue_num; bool notify = false; blk_status_t status; @@ -1049,7 +1049,6 @@ static struct virtio_driver virtio_blk = { .feature_table_size = ARRAY_SIZE(features), .feature_table_legacy = features_legacy, .feature_table_size_legacy = ARRAY_SIZE(features_legacy), - .suppress_used_validation = true, .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 08d7953ec5f1..25071126995b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1853,12 +1853,14 @@ static const struct block_device_operations zram_devops = { .owner = THIS_MODULE }; +#ifdef CONFIG_ZRAM_WRITEBACK static const struct block_device_operations zram_wb_devops = { .open = zram_open, .submit_bio = zram_submit_bio, .swap_slot_free_notify = zram_slot_free_notify, .owner = THIS_MODULE }; +#endif static DEVICE_ATTR_WO(compact); static DEVICE_ATTR_RW(disksize); diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index ed3c4c42fc23..d68d05d5d383 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c @@ -281,7 +281,7 @@ agp_ioc_init(void __iomem *ioc_regs) return 0; } -static int +static int __init lba_find_capability(int cap) { struct _parisc_agp_info *info = &parisc_agp_info; @@ -366,7 +366,7 @@ fail: return error; } -static int +static int __init find_quicksilver(struct device *dev, void *data) { struct parisc_device **lba = data; @@ -378,7 +378,7 @@ find_quicksilver(struct device *dev, void *data) return 0; } -static int +static int __init parisc_agp_init(void) { extern struct sba_device *sba_list; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index deed355422f4..c837d5416e0e 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -191,6 +191,8 @@ struct ipmi_user { struct work_struct remove_work; }; +static struct workqueue_struct *remove_work_wq; + static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) __acquires(user->release_barrier) { @@ -1297,7 +1299,7 @@ static void free_user(struct kref *ref) struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); /* SRCU cleanup must happen in task context. */ - schedule_work(&user->remove_work); + queue_work(remove_work_wq, &user->remove_work); } static void _ipmi_destroy_user(struct ipmi_user *user) @@ -3918,9 +3920,11 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, /* We didn't find a user, deliver an error response. */ ipmi_inc_stat(intf, unhandled_commands); - msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3); - msg->data[1] = msg->rsp[2]; - msg->data[2] = msg->rsp[4] & ~0x3; + msg->data[0] = (netfn + 1) << 2; + msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */ + msg->data[1] = msg->rsp[1]; /* Addr */ + msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */ + msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */ msg->data[3] = cmd; msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; msg->data_size = 5; @@ -4455,13 +4459,24 @@ return_unspecified: msg->rsp[2] = IPMI_ERR_UNSPECIFIED; msg->rsp_size = 3; } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { - /* commands must have at least 3 bytes, responses 4. */ - if (is_cmd && (msg->rsp_size < 3)) { + /* commands must have at least 4 bytes, responses 5. */ + if (is_cmd && (msg->rsp_size < 4)) { ipmi_inc_stat(intf, invalid_commands); goto out; } - if (!is_cmd && (msg->rsp_size < 4)) - goto return_unspecified; + if (!is_cmd && (msg->rsp_size < 5)) { + ipmi_inc_stat(intf, invalid_ipmb_responses); + /* Construct a valid error response. */ + msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */ + msg->rsp[0] |= (1 << 2); /* Make it a response */ + msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */ + msg->rsp[1] = msg->data[1]; /* Addr */ + msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */ + msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */ + msg->rsp[3] = msg->data[3]; /* Cmd */ + msg->rsp[4] = IPMI_ERR_UNSPECIFIED; + msg->rsp_size = 5; + } } else if ((msg->data_size >= 2) && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) && (msg->data[1] == IPMI_SEND_MSG_CMD) @@ -5031,6 +5046,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) if (rv) { rv->done = free_smi_msg; rv->user_data = NULL; + rv->type = IPMI_SMI_MSG_TYPE_NORMAL; atomic_inc(&smi_msg_inuse_count); } return rv; @@ -5383,6 +5399,13 @@ static int ipmi_init_msghandler(void) atomic_notifier_chain_register(&panic_notifier_list, &panic_block); + remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); + if (!remove_work_wq) { + pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); + rv = -ENOMEM; + goto out; + } + initialized = true; out: @@ -5408,6 +5431,8 @@ static void __exit cleanup_ipmi(void) int count; if (initialized) { + destroy_workqueue(remove_work_wq); + atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e338d2f010fe..096c3848fa41 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1004,10 +1004,9 @@ static struct kobj_type ktype_cpufreq = { .release = cpufreq_sysfs_release, }; -static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu) +static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu, + struct device *dev) { - struct device *dev = get_cpu_device(cpu); - if (unlikely(!dev)) return; @@ -1296,8 +1295,9 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) if (policy->max_freq_req) { /* - * CPUFREQ_CREATE_POLICY notification is sent only after - * successfully adding max_freq_req request. + * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY + * notification, since CPUFREQ_CREATE_POLICY notification was + * sent after adding max_freq_req earlier. */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_REMOVE_POLICY, policy); @@ -1391,7 +1391,7 @@ static int cpufreq_online(unsigned int cpu) if (new_policy) { for_each_cpu(j, policy->related_cpus) { per_cpu(cpufreq_cpu_data, j) = policy; - add_cpu_dev_symlink(policy, j); + add_cpu_dev_symlink(policy, j, get_cpu_device(j)); } policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req), @@ -1565,7 +1565,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) /* Create sysfs link on CPU registration */ policy = per_cpu(cpufreq_cpu_data, cpu); if (policy) - add_cpu_dev_symlink(policy, cpu); + add_cpu_dev_symlink(policy, cpu, dev); return 0; } diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index f57a39ddd063..ab7fd896d2c4 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf) int i; table = &buffer->sg_table; - for_each_sg(table->sgl, sg, table->nents, i) { + for_each_sgtable_sg(table, sg, i) { struct page *page = sg_page(sg); __free_pages(page, compound_order(page)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 71a6a9ef54ac..6348559608ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1396,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct sg_table *sg = NULL; uint64_t user_addr = 0; struct amdgpu_bo *bo; - struct drm_gem_object *gobj; + struct drm_gem_object *gobj = NULL; u32 domain, alloc_domain; u64 alloc_flags; int ret; @@ -1506,14 +1506,16 @@ allocate_init_user_pages_failed: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: - drm_gem_object_put(gobj); /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: unreserve_mem_limit(adev, size, alloc_domain, !!sg); err_reserve_limit: mutex_destroy(&(*mem)->lock); - kfree(*mem); + if (gobj) + drm_gem_object_put(gobj); + else + kfree(*mem); err: if (sg) { sg_free_table(sg); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d94fa748e6bb..1e651b959141 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3833,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ - if (!amdgpu_device_has_dc_support(adev)) + if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) drm_helper_force_disable_all(adev_to_drm(adev)); else drm_atomic_helper_shutdown(adev_to_drm(adev)); @@ -4289,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, { int r; + amdgpu_amdkfd_pre_reset(adev); + if (from_hypervisor) r = amdgpu_virt_request_full_gpu(adev, true); else @@ -4316,6 +4318,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); + amdgpu_amdkfd_post_reset(adev); error: if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { @@ -5030,7 +5033,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, cancel_delayed_work_sync(&tmp_adev->delayed_init_work); - amdgpu_amdkfd_pre_reset(tmp_adev); + if (!amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_pre_reset(tmp_adev); /* * Mark these ASICs to be reseted as untracked first @@ -5129,7 +5133,7 @@ skip_hw_reset: drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); } - if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { + if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); } @@ -5148,9 +5152,9 @@ skip_hw_reset: skip_sched_resume: list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - /* unlock kfd */ - if (!need_emergency_restart) - amdgpu_amdkfd_post_reset(tmp_adev); + /* unlock kfd: SRIOV would do it separately */ + if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_post_reset(tmp_adev); /* kfd_post_reset will do nothing if kfd device is not initialized, * need to bring up kfd here if it's not be initialized before diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 503995c7ff6c..ea00090b3fb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = { [HDP_HWIP] = HDP_HWID, [SDMA0_HWIP] = SDMA0_HWID, [SDMA1_HWIP] = SDMA1_HWID, + [SDMA2_HWIP] = SDMA2_HWID, + [SDMA3_HWIP] = SDMA3_HWID, [MMHUB_HWIP] = MMHUB_HWID, [ATHUB_HWIP] = ATHUB_HWID, [NBIO_HWIP] = NBIF_HWID, @@ -918,6 +920,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(3, 0, 64): case IP_VERSION(3, 1, 1): case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 192): amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 4f7c70845785..585961c2f5f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) break; case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) fw_name = FIRMWARE_SIENNA_CICHLID; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index ce982afeff91..ac9a8cd21c4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle) int i = 0; for (i = 0; i < adev->mode_info.num_crtc; i++) - if (adev->mode_info.crtcs[i]) - hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); + if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function) + hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer); kfree(adev->mode_info.bios_hardcoded_edid); kfree(adev->amdgpu_vkms_output); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 34478bcc4d09..b305fd39874f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4060,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle) gfx_v9_0_cp_enable(adev, false); - /* Skip suspend with A+A reset */ - if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { - dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); + /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */ + if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) || + (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) { + dev_dbg(adev->dev, "Skipping RLC halt\n"); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index a6659d9ecdd2..2ec1ffb36b1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): if (amdgpu_sriov_vf(adev)) { if (encode) *codecs = &sriov_sc_video_codecs_encode; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 58b89b53ebe6..3cb4681c5f53 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1574,7 +1574,6 @@ retry_flush_work: static void svm_range_restore_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); - struct amdkfd_process_info *process_info; struct svm_range_list *svms; struct svm_range *prange; struct kfd_process *p; @@ -1594,12 +1593,10 @@ static void svm_range_restore_work(struct work_struct *work) * the lifetime of this thread, kfd_process and mm will be valid. */ p = container_of(svms, struct kfd_process, svms); - process_info = p->kgd_process_info; mm = p->mm; if (!mm) return; - mutex_lock(&process_info->lock); svm_range_list_lock_and_flush_work(svms, mm); mutex_lock(&svms->lock); @@ -1652,7 +1649,6 @@ static void svm_range_restore_work(struct work_struct *work) out_reschedule: mutex_unlock(&svms->lock); mmap_write_unlock(mm); - mutex_unlock(&process_info->lock); /* If validation failed, reschedule another attempt */ if (evicted_ranges) { @@ -2614,6 +2610,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, if (atomic_read(&svms->drain_pagefaults)) { pr_debug("draining retry fault, drop fault 0x%llx\n", addr); + r = 0; goto out; } @@ -2623,6 +2620,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, mm = get_task_mm(p->lead_thread); if (!mm) { pr_debug("svms 0x%p failed to get mm\n", svms); + r = 0; goto out; } @@ -2660,6 +2658,7 @@ retry_write_locked: if (svm_range_skip_recover(prange)) { amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + r = 0; goto out_unlock_range; } @@ -2668,6 +2667,7 @@ retry_write_locked: if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) { pr_debug("svms 0x%p [0x%lx %lx] already restored\n", svms, prange->start, prange->last); + r = 0; goto out_unlock_range; } @@ -3177,7 +3177,6 @@ static int svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) { - struct amdkfd_process_info *process_info = p->kgd_process_info; struct mm_struct *mm = current->mm; struct list_head update_list; struct list_head insert_list; @@ -3196,8 +3195,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, svms = &p->svms; - mutex_lock(&process_info->lock); - svm_range_list_lock_and_flush_work(svms, mm); r = svm_range_is_valid(p, start, size); @@ -3273,8 +3270,6 @@ out_unlock_range: mutex_unlock(&svms->lock); mmap_read_unlock(mm); out: - mutex_unlock(&process_info->lock); - pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, &p->svms, start, start + size - 1, r); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index cce062adc439..8a441a22c46e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) ret = -EINVAL; goto cleanup; } + + if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) && + (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) { + DRM_DEBUG_DRIVER("No DP connector available for CRC source\n"); + ret = -EINVAL; + goto cleanup; + } + } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 32a5ce09a62a..cc34a35d0bcb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -36,6 +36,8 @@ #include "dm_helpers.h" #include "dc_link_ddc.h" +#include "ddc_service_types.h" +#include "dpcd_defs.h" #include "i2caux_interface.h" #include "dmub_cmd.h" @@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { }; #if defined(CONFIG_DRM_AMD_DC_DCN) +static bool needs_dsc_aux_workaround(struct dc_link *link) +{ + if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && + link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) + return true; + + return false; +} + static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) { struct dc_sink *dc_sink = aconnector->dc_sink; @@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto u8 *dsc_branch_dec_caps = NULL; aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); -#if defined(CONFIG_HP_HOOK_WORKAROUND) + /* * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs * because it only check the dsc/fec caps of the "port variable" and not the dock @@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux * */ - - if (!aconnector->dsc_aux && !port->parent->port_parent) + if (!aconnector->dsc_aux && !port->parent->port_parent && + needs_dsc_aux_workaround(aconnector->dc_link)) aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; -#endif + if (!aconnector->dsc_aux) return false; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 60544788e911..c8457babfdea 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link, dal_ddc_service_set_transaction_type(link->ddc, sink_caps->transaction_type); +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock + * reports DSC support. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) + link->wa_flags.dpia_mst_dsc_always_on = true; +#endif + #if defined(CONFIG_DRM_AMD_DC_HDCP) /* In case of fallback to SST when topology discovery below fails * HDCP caps will be querried again later by the upper layer (caller @@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link, LINK_INFO("link=%d, mst branch is now Disconnected\n", link->link_index); + /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->wa_flags.dpia_mst_dsc_always_on = false; + dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); link->mst_stream_alloc_table.stream_count = 0; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index cb7bf9148904..13bc69d6b679 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training( } for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++) - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0; + lt_settings->dpcd_lane_settings[lane].raw = 0; } if (status == LINK_TRAINING_SUCCESS) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c32fdccd4d92..e2d9a46d0e1a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged( if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) return false; + // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks + if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) + return false; + return true; } @@ -2252,16 +2256,6 @@ enum dc_status dc_validate_global_state( if (!new_ctx) return DC_ERROR_UNEXPECTED; -#if defined(CONFIG_DRM_AMD_DC_DCN) - - /* - * Update link encoder to stream assignment. - * TODO: Split out reason allocation from validation. - */ - if (dc->res_pool->funcs->link_encs_assign && fast_validate == false) - dc->res_pool->funcs->link_encs_assign( - dc, new_ctx, new_ctx->streams, new_ctx->stream_count); -#endif if (dc->res_pool->funcs->validate_global) { result = dc->res_pool->funcs->validate_global(dc, new_ctx); @@ -2313,6 +2307,16 @@ enum dc_status dc_validate_global_state( if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) result = DC_FAIL_BANDWIDTH_VALIDATE; +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* + * Only update link encoder to stream assignment after bandwidth validation passed. + * TODO: Split out assignment and validation. + */ + if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false) + dc->res_pool->funcs->link_encs_assign( + dc, new_ctx, new_ctx->streams, new_ctx->stream_count); +#endif + return result; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3aac3f4a2852..618e7989176f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -508,7 +508,8 @@ union dpia_debug_options { uint32_t disable_dpia:1; uint32_t force_non_lttpr:1; uint32_t extend_aux_rd_interval:1; - uint32_t reserved:29; + uint32_t disable_mst_dsc_work_around:1; + uint32_t reserved:28; } bits; uint32_t raw; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 180ecd860296..b01077a6af0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -191,6 +191,8 @@ struct dc_link { bool dp_skip_DID2; bool dp_skip_reset_segment; bool dp_mot_reset_segment; + /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */ + bool dpia_mst_dsc_always_on; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 01168b8955bf..8a3244585d80 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu) dev_err(adev->dev, "Failed to disable smu features.\n"); } - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) && + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 7b9f69f21f1e..bca0de92802e 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -9,6 +9,7 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/module.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 39e11eaec1a3..aa7238245b0e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1640,6 +1640,9 @@ struct intel_dp { struct intel_dp_pcon_frl frl; struct intel_psr psr; + + /* When we last wrote the OUI for eDP */ + unsigned long last_oui_write; }; enum lspcon_vendor { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index be883469d2fc..a552f05a67e5 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -29,6 +29,7 @@ #include <linux/i2c.h> #include <linux/notifier.h> #include <linux/slab.h> +#include <linux/timekeeping.h> #include <linux/types.h> #include <asm/byteorder.h> @@ -1955,6 +1956,16 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) drm_err(&i915->drm, "Failed to write source OUI\n"); + + intel_dp->last_oui_write = jiffies; +} + +void intel_dp_wait_source_oui(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + drm_dbg_kms(&i915->drm, "Performing OUI wait\n"); + wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30); } /* If the device supports it, try to set the power state appropriately */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index ce229026dc91..b64145a3869a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -119,4 +119,6 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_dp_phy_test(struct intel_encoder *encoder); +void intel_dp_wait_source_oui(struct intel_dp *intel_dp); + #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 569d17b4d00f..3897468140e0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -36,6 +36,7 @@ #include "intel_backlight.h" #include "intel_display_types.h" +#include "intel_dp.h" #include "intel_dp_aux_backlight.h" /* TODO: @@ -106,6 +107,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) int ret; u8 tcon_cap[4]; + intel_dp_wait_source_oui(intel_dp); + ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap)); if (ret != sizeof(tcon_cap)) return false; @@ -204,6 +207,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, int ret; u8 old_ctrl, ctrl; + intel_dp_wait_source_oui(intel_dp); + ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl); if (ret != 1) { drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret); @@ -293,6 +298,13 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); + if (!panel->backlight.edp.vesa.info.aux_enable) { + u32 pwm_level = intel_backlight_invert_pwm_level(connector, + panel->backlight.pwm_level_max); + + panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level); + } + drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level); } @@ -304,6 +316,10 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info); + + if (!panel->backlight.edp.vesa.info.aux_enable) + panel->backlight.pwm_funcs->disable(old_conn_state, + intel_backlight_invert_pwm_level(connector, 0)); } static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe) @@ -321,6 +337,15 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, if (ret < 0) return ret; + if (!panel->backlight.edp.vesa.info.aux_enable) { + ret = panel->backlight.pwm_funcs->setup(connector, pipe); + if (ret < 0) { + drm_err(&i915->drm, + "Failed to setup PWM backlight controls for eDP backlight: %d\n", + ret); + return ret; + } + } panel->backlight.max = panel->backlight.edp.vesa.info.max; panel->backlight.min = 0; if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { @@ -340,12 +365,7 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = dp_to_i915(intel_dp); - /* TODO: We currently only support AUX only backlight configurations, not backlights which - * require a mix of PWM and AUX controls to work. In the mean time, these machines typically - * work just fine using normal PWM controls anyway. - */ - if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && - drm_edp_backlight_supported(intel_dp->edp_dpcd)) { + if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) { drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n"); return true; } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 67d14afa6623..b67f620c3d93 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -6,6 +6,7 @@ #include <linux/slab.h> /* fault-inject.h is not standalone! */ #include <linux/fault-inject.h> +#include <linux/sched/mm.h> #include "gem/i915_gem_lmem.h" #include "i915_trace.h" diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index e1f362530889..ed73d9bc9d40 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -621,13 +621,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0, false); - - /* - * Wa_14012131227:dg1 - * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p - */ - wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1, - GEN9_RHWO_OPTIMIZATION_DISABLE); } static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 820a1f38b271..89cccefeea63 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -29,6 +29,7 @@ #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include "gem/i915_gem_context.h" #include "gt/intel_breadcrumbs.h" diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c index 65fdca366e41..f74f8048af8f 100644 --- a/drivers/gpu/drm/lima/lima_device.c +++ b/drivers/gpu/drm/lima/lima_device.c @@ -4,6 +4,7 @@ #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/clk.h> +#include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index ae11061727ff..39197b4beea7 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -4,8 +4,8 @@ config DRM_MSM tristate "MSM DRM" depends on DRM depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST + depends on COMMON_CLK depends on IOMMU_SUPPORT - depends on (OF && COMMON_CLK) || COMPILE_TEST depends on QCOM_OCMEM || QCOM_OCMEM=n depends on QCOM_LLCC || QCOM_LLCC=n depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 40577f8856d8..093454457545 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -23,8 +23,10 @@ msm-y := \ hdmi/hdmi_i2c.o \ hdmi/hdmi_phy.o \ hdmi/hdmi_phy_8960.o \ + hdmi/hdmi_phy_8996.o \ hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x74.o \ + hdmi/hdmi_pll_8960.o \ edp/edp.o \ edp/edp_aux.o \ edp/edp_bridge.o \ @@ -37,6 +39,7 @@ msm-y := \ disp/mdp4/mdp4_dtv_encoder.o \ disp/mdp4/mdp4_lcdc_encoder.o \ disp/mdp4/mdp4_lvds_connector.o \ + disp/mdp4/mdp4_lvds_pll.o \ disp/mdp4/mdp4_irq.o \ disp/mdp4/mdp4_kms.o \ disp/mdp4/mdp4_plane.o \ @@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ dp/dp_audio.o msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o -msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o -msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o -msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 267a880811d6..78aad5216a61 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; - u32 gpu_scid, cntl1_regval = 0; + u32 cntl1_regval = 0; if (IS_ERR(a6xx_gpu->llc_mmio)) return; if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { - gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); + u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); gpu_scid &= 0x1f; cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | (gpu_scid << 15) | (gpu_scid << 20); + + /* On A660, the SCID programming for UCHE traffic is done in + * A6XX_GBIF_SCACHE_CNTL0[14:10] + */ + if (adreno_is_a660_family(adreno_gpu)) + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | + (1 << 8), (gpu_scid << 10) | (1 << 8)); } /* @@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) } gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); - - /* On A660, the SCID programming for UCHE traffic is done in - * A6XX_GBIF_SCACHE_CNTL0[14:10] - */ - if (adreno_is_a660_family(adreno_gpu)) - gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | - (1 << 8), (gpu_scid << 10) | (1 << 8)); } static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) @@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) return (unsigned long)busy_time; } -void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) +static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 7501849ed15d..6e90209cd543 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); a6xx_state->gmu_registers = state_kcalloc(a6xx_state, - 2, sizeof(*a6xx_state->gmu_registers)); + 3, sizeof(*a6xx_state->gmu_registers)); if (!a6xx_state->gmu_registers) return; - a6xx_state->nr_gmu_registers = 2; + a6xx_state->nr_gmu_registers = 3; /* Get the CX GMU registers from AHB */ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0], diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index eb40d8413bca..6d36f63c3338 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -33,6 +33,7 @@ struct dp_aux_private { bool read; bool no_send_addr; bool no_send_stop; + bool initted; u32 offset; u32 segment; @@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, } mutex_lock(&aux->mutex); + if (!aux->initted) { + ret = -EIO; + goto exit; + } dp_aux_update_offset_and_segment(aux, msg); dp_aux_transfer_helper(aux, msg, true); @@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, } aux->cmd_busy = false; + +exit: mutex_unlock(&aux->mutex); return ret; @@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux) aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + mutex_lock(&aux->mutex); + dp_catalog_aux_enable(aux->catalog, true); aux->retry_cnt = 0; + aux->initted = true; + + mutex_unlock(&aux->mutex); } void dp_aux_deinit(struct drm_dp_aux *dp_aux) @@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux) aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + mutex_lock(&aux->mutex); + + aux->initted = false; dp_catalog_aux_enable(aux->catalog, false); + + mutex_unlock(&aux->mutex); } int dp_aux_register(struct drm_dp_aux *dp_aux) diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index f69a125f9559..0afc3b756f92 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1658,6 +1658,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, if (!prop) { DRM_DEV_DEBUG(dev, "failed to find data lane mapping, using default\n"); + /* Set the number of date lanes to 4 by default. */ + msm_host->num_data_lanes = 4; return 0; } diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 09d2d279c30a..dee13fedee3b 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) goto free_priv; pm_runtime_get_sync(&gpu->pdev->dev); + msm_gpu_hw_init(gpu); show_priv->state = gpu->funcs->gpu_state_get(gpu); pm_runtime_put_sync(&gpu->pdev->dev); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 7936e8d498dd..892c04365239 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, return ret; } -static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, - struct drm_file *file) +static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id, + ktime_t timeout) { - struct msm_drm_private *priv = dev->dev_private; - struct drm_msm_wait_fence *args = data; - ktime_t timeout = to_ktime(args->timeout); - struct msm_gpu_submitqueue *queue; - struct msm_gpu *gpu = priv->gpu; struct dma_fence *fence; int ret; - if (args->pad) { - DRM_ERROR("invalid pad: %08x\n", args->pad); + if (fence_id > queue->last_fence) { + DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n", + fence_id, queue->last_fence); return -EINVAL; } - if (!gpu) - return 0; - - queue = msm_submitqueue_get(file->driver_priv, args->queueid); - if (!queue) - return -ENOENT; - /* * Map submitqueue scoped "seqno" (which is actually an idr key) * back to underlying dma-fence @@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, ret = mutex_lock_interruptible(&queue->lock); if (ret) return ret; - fence = idr_find(&queue->fence_idr, args->fence); + fence = idr_find(&queue->fence_idr, fence_id); if (fence) fence = dma_fence_get_rcu(fence); mutex_unlock(&queue->lock); @@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, } dma_fence_put(fence); + + return ret; +} + +static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_msm_wait_fence *args = data; + struct msm_gpu_submitqueue *queue; + int ret; + + if (args->pad) { + DRM_ERROR("invalid pad: %08x\n", args->pad); + return -EINVAL; + } + + if (!priv->gpu) + return 0; + + queue = msm_submitqueue_get(file->driver_priv, args->queueid); + if (!queue) + return -ENOENT; + + ret = wait_fence(queue, args->fence, to_ktime(args->timeout)); + msm_submitqueue_put(queue); return ret; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 104fdfc14027..512d55eecbaf 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1056,8 +1056,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct { struct msm_gem_object *msm_obj = to_msm_bo(obj); - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); return 0; @@ -1121,7 +1120,7 @@ static int msm_gem_new_impl(struct drm_device *dev, break; fallthrough; default: - DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", + DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n", (flags & MSM_BO_CACHE_MASK)); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 4a1420b05e97..086dacf2f26a 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -5,6 +5,7 @@ */ #include <linux/vmalloc.h> +#include <linux/sched/mm.h> #include "msm_drv.h" #include "msm_gem.h" diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3cb029f10925..282628d6b72c 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, args->nr_cmds); if (IS_ERR(submit)) { ret = PTR_ERR(submit); + submit = NULL; goto out_unlock; } @@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, drm_sched_entity_push_job(&submit->base); args->fence = submit->fence_id; + queue->last_fence = submit->fence_id; msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs); msm_process_post_deps(post_deps, args->nr_out_syncobjs, diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 59cdd00b69d0..48ea2de911f1 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, * @ring_nr: the ringbuffer used by this submitqueue, which is determined * by the submitqueue's priority * @faults: the number of GPU hangs associated with this submitqueue + * @last_fence: the sequence number of the last allocated fence (for error + * checking) * @ctx: the per-drm_file context associated with the submitqueue (ie. * which set of pgtables do submits jobs associated with the * submitqueue use) @@ -374,6 +376,7 @@ struct msm_gpu_submitqueue { u32 flags; u32 ring_nr; int faults; + uint32_t last_fence; struct msm_file_private *ctx; struct list_head node; struct idr fence_idr; diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index 8b7473f69cb8..384e90c4b2a7 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, struct msm_gpu *gpu = dev_to_gpu(dev); struct dev_pm_opp *opp; + /* + * Note that devfreq_recommended_opp() can modify the freq + * to something that actually is in the opp table: + */ opp = devfreq_recommended_opp(dev, freq, flags); /* @@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, */ if (gpu->devfreq.idle_freq) { gpu->devfreq.idle_freq = *freq; + dev_pm_opp_put(opp); return 0; } @@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work) struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq); unsigned long idle_freq, target_freq = 0; - if (!df->devfreq) - return; - /* * Hold devfreq lock to synchronize with get_dev_status()/ * target() callbacks @@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu) { struct msm_gpu_devfreq *df = &gpu->devfreq; + if (!df->devfreq) + return; + msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1), - HRTIMER_MODE_ABS); + HRTIMER_MODE_REL); } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 7e83c00a3f48..79c870a3bef8 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -34,6 +34,7 @@ #include <linux/sched.h> #include <linux/shmem_fs.h> #include <linux/file.h> +#include <linux/module.h> #include <drm/drm_cache.h> #include <drm/ttm/ttm_bo_driver.h> diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index f0b3e4cf5bce..b61792d2aa65 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -337,10 +337,10 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; - struct drm_crtc_state *old_crtc_state; struct drm_crtc_state *new_crtc_state; struct drm_crtc *crtc; struct vc4_hvs_state *old_hvs_state; + unsigned int channel; int i; for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { @@ -353,30 +353,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); } - if (vc4->hvs->hvs5) - clk_set_min_rate(hvs->core_clk, 500000000); - old_hvs_state = vc4_hvs_get_old_global_state(state); - if (!old_hvs_state) + if (IS_ERR(old_hvs_state)) return; - for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { - struct vc4_crtc_state *vc4_crtc_state = - to_vc4_crtc_state(old_crtc_state); - unsigned int channel = vc4_crtc_state->assigned_channel; + for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) { + struct drm_crtc_commit *commit; int ret; - if (channel == VC4_HVS_CHANNEL_DISABLED) + if (!old_hvs_state->fifo_state[channel].in_use) continue; - if (!old_hvs_state->fifo_state[channel].in_use) + commit = old_hvs_state->fifo_state[channel].pending_commit; + if (!commit) continue; - ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit); + ret = drm_crtc_commit_wait(commit); if (ret) drm_err(dev, "Timed out waiting for commit\n"); + + drm_crtc_commit_put(commit); + old_hvs_state->fifo_state[channel].pending_commit = NULL; } + if (vc4->hvs->hvs5) + clk_set_min_rate(hvs->core_clk, 500000000); + drm_atomic_helper_commit_modeset_disables(dev, state); vc4_ctm_commit(vc4, state); @@ -410,8 +412,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state) unsigned int i; hvs_state = vc4_hvs_get_new_global_state(state); - if (!hvs_state) - return -EINVAL; + if (WARN_ON(IS_ERR(hvs_state))) + return PTR_ERR(hvs_state); for_each_new_crtc_in_state(state, crtc, crtc_state, i) { struct vc4_crtc_state *vc4_crtc_state = @@ -668,12 +670,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) for (i = 0; i < HVS_NUM_CHANNELS; i++) { state->fifo_state[i].in_use = old_state->fifo_state[i].in_use; - - if (!old_state->fifo_state[i].pending_commit) - continue; - - state->fifo_state[i].pending_commit = - drm_crtc_commit_get(old_state->fifo_state[i].pending_commit); } return &state->base; @@ -762,8 +758,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev, unsigned int i; hvs_new_state = vc4_hvs_get_global_state(state); - if (!hvs_new_state) - return -EINVAL; + if (IS_ERR(hvs_new_state)) + return PTR_ERR(hvs_new_state); for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++) if (!hvs_new_state->fifo_state[i].in_use) diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index d86e1ad4a972..5072dbb0669a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -157,36 +157,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev) schedule_work(&vgdev->config_changed_work); } -static __poll_t virtio_gpu_poll(struct file *filp, - struct poll_table_struct *wait) -{ - struct drm_file *drm_file = filp->private_data; - struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; - struct drm_device *dev = drm_file->minor->dev; - struct virtio_gpu_device *vgdev = dev->dev_private; - struct drm_pending_event *e = NULL; - __poll_t mask = 0; - - if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask) - return drm_poll(filp, wait); - - poll_wait(filp, &drm_file->event_wait, wait); - - if (!list_empty(&drm_file->event_list)) { - spin_lock_irq(&dev->event_lock); - e = list_first_entry(&drm_file->event_list, - struct drm_pending_event, link); - drm_file->event_space += e->event->length; - list_del(&e->link); - spin_unlock_irq(&dev->event_lock); - - kfree(e); - mask |= EPOLLIN | EPOLLRDNORM; - } - - return mask; -} - static struct virtio_device_id id_table[] = { { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID }, { 0 }, @@ -226,17 +196,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>"); MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); MODULE_AUTHOR("Alon Levy"); -static const struct file_operations virtio_gpu_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .poll = virtio_gpu_poll, - .read = drm_read, - .llseek = noop_llseek, - .mmap = drm_gem_mmap -}; +DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index e0265fe74aa5..0a194aaad419 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver { spinlock_t lock; }; -#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000 struct virtio_gpu_fence_event { struct drm_pending_event base; struct drm_event event; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 5618a1d5879c..3607646d3229 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev, if (!e) return -ENOMEM; - e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL; + e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED; e->event.length = sizeof(e->event); ret = drm_event_reserve_init(dev, file, &e->base, &e->event); diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 9f5435b55949..a7c78ac96270 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -207,14 +207,14 @@ config HID_CHERRY config HID_CHICONY tristate "Chicony devices" - depends on HID + depends on USB_HID default !EXPERT help Support for Chicony Tactical pad and special keys on Chicony keyboards. config HID_CORSAIR tristate "Corsair devices" - depends on HID && USB && LEDS_CLASS + depends on USB_HID && LEDS_CLASS help Support for Corsair devices that are not fully compliant with the HID standard. @@ -245,7 +245,7 @@ config HID_MACALLY config HID_PRODIKEYS tristate "Prodikeys PC-MIDI Keyboard support" - depends on HID && SND + depends on USB_HID && SND select SND_RAWMIDI help Support for Prodikeys PC-MIDI Keyboard device support. @@ -560,7 +560,7 @@ config HID_LENOVO config HID_LOGITECH tristate "Logitech devices" - depends on HID + depends on USB_HID depends on LEDS_CLASS default !EXPERT help @@ -951,7 +951,7 @@ config HID_SAITEK config HID_SAMSUNG tristate "Samsung InfraRed remote control or keyboards" - depends on HID + depends on USB_HID help Support for Samsung InfraRed remote control or keyboards. diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index f3ecddc519ee..08c9a9a60ae4 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -1028,8 +1028,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) if (drvdata->quirks & QUIRK_IS_MULTITOUCH) drvdata->tp = &asus_i2c_tp; - if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && - hid_is_using_ll_driver(hdev, &usb_hid_driver)) { + if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) { @@ -1057,8 +1056,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) drvdata->tp = &asus_t100chi_tp; } - if ((drvdata->quirks & QUIRK_MEDION_E1239T) && - hid_is_using_ll_driver(hdev, &usb_hid_driver)) { + if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) { struct usb_host_interface *alt = to_usb_interface(hdev->dev.parent)->altsetting; diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c index db6da21ade06..74ad8bf98bfd 100644 --- a/drivers/hid/hid-bigbenff.c +++ b/drivers/hid/hid-bigbenff.c @@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work) struct bigben_device, worker); struct hid_field *report_field = bigben->report->field[0]; - if (bigben->removed) + if (bigben->removed || !report_field) return; if (bigben->work_led) { diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c index ca556d39da2a..f04d2aa23efe 100644 --- a/drivers/hid/hid-chicony.c +++ b/drivers/hid/hid-chicony.c @@ -114,6 +114,9 @@ static int ch_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; + if (!hid_is_usb(hdev)) + return -EINVAL; + hdev->quirks |= HID_QUIRK_INPUT_PER_APP; ret = hid_parse(hdev); if (ret) { diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index 902a60e249ed..8c895c820b67 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c @@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id) int ret; unsigned long quirks = id->driver_data; struct corsair_drvdata *drvdata; - struct usb_interface *usbif = to_usb_interface(dev->dev.parent); + struct usb_interface *usbif; + + if (!hid_is_usb(dev)) + return -EINVAL; + + usbif = to_usb_interface(dev->dev.parent); drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata), GFP_KERNEL); diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c index 021049805bb7..3091355d48df 100644 --- a/drivers/hid/hid-elan.c +++ b/drivers/hid/hid-elan.c @@ -50,7 +50,7 @@ struct elan_drvdata { static int is_not_elan_touchpad(struct hid_device *hdev) { - if (hdev->bus == BUS_USB) { + if (hid_is_usb(hdev)) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); return (intf->altsetting->desc.bInterfaceNumber != diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index 383dfda8c12f..8e960d7b233b 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) int ret; struct usb_device *udev; + if (!hid_is_usb(hdev)) + return -EINVAL; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c index 8ee77f4afe9f..79505c64dbfe 100644 --- a/drivers/hid/hid-ft260.c +++ b/drivers/hid/hid-ft260.c @@ -915,6 +915,9 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id) struct ft260_get_chip_version_report version; int ret; + if (!hid_is_usb(hdev)) + return -EINVAL; + dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index 8123b871a3eb..0403beb3104b 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -586,6 +586,8 @@ static const struct hid_device_id hammer_devices[] = { { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) }, diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c index 0a38e8e9bc78..403506b9697e 100644 --- a/drivers/hid/hid-holtek-kbd.c +++ b/drivers/hid/hid-holtek-kbd.c @@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, static int holtek_kbd_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - int ret = hid_parse(hdev); + struct usb_interface *intf; + int ret; + + if (!hid_is_usb(hdev)) + return -EINVAL; + ret = hid_parse(hdev); if (!ret) ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + intf = to_usb_interface(hdev->dev.parent); if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) { struct hid_input *hidinput; list_for_each_entry(hidinput, &hdev->inputs, list) { diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c index 195b735b001d..b7172c48ef9f 100644 --- a/drivers/hid/hid-holtek-mouse.c +++ b/drivers/hid/hid-holtek-mouse.c @@ -62,6 +62,14 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, return rdesc; } +static int holtek_mouse_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + if (!hid_is_usb(hdev)) + return -EINVAL; + return 0; +} + static const struct hid_device_id holtek_mouse_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, @@ -83,6 +91,7 @@ static struct hid_driver holtek_mouse_driver = { .name = "holtek_mouse", .id_table = holtek_mouse_devices, .report_fixup = holtek_mouse_report_fixup, + .probe = holtek_mouse_probe, }; module_hid_driver(holtek_mouse_driver); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 96a455921c67..19da07777d62 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -399,6 +399,7 @@ #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755 #define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05 #define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817 +#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A @@ -501,6 +502,7 @@ #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 #define USB_DEVICE_ID_GOOGLE_DON 0x5050 +#define USB_DEVICE_ID_GOOGLE_EEL 0x5057 #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f @@ -886,6 +888,7 @@ #define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 #define USB_DEVICE_ID_MS_POWER_COVER 0x07da +#define USB_DEVICE_ID_MS_SURFACE3_COVER 0x07de #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd #define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 217f2d1b91c5..03f994541981 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -325,6 +325,8 @@ static const struct hid_device_id hid_battery_quirks[] = { HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN), HID_BATTERY_QUIRK_IGNORE }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN), + HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15), HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15), diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index d40af911df63..fb3f7258009c 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -749,12 +749,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report, static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *iface = to_usb_interface(hdev->dev.parent); - __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber; + struct usb_interface *iface; + __u8 iface_num; unsigned int connect_mask = HID_CONNECT_DEFAULT; struct lg_drv_data *drv_data; int ret; + if (!hid_is_usb(hdev)) + return -EINVAL; + + iface = to_usb_interface(hdev->dev.parent); + iface_num = iface->cur_altsetting->desc.bInterfaceNumber; + /* G29 only work with the 1st interface */ if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && (iface_num != 0)) { diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index a0017b010c34..7106b921b53c 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1777,7 +1777,7 @@ static int logi_dj_probe(struct hid_device *hdev, case recvr_type_bluetooth: no_dj_interfaces = 2; break; case recvr_type_dinovo: no_dj_interfaces = 2; break; } - if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) { + if (hid_is_usb(hdev)) { intf = to_usb_interface(hdev->dev.parent); if (intf && intf->altsetting->desc.bInterfaceNumber >= no_dj_interfaces) { diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c index 2666af02d5c1..e4e9471d0f1e 100644 --- a/drivers/hid/hid-prodikeys.c +++ b/drivers/hid/hid-prodikeys.c @@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber; + struct usb_interface *intf; + unsigned short ifnum; unsigned long quirks = id->driver_data; struct pk_device *pk; struct pcmidi_snd *pm = NULL; + if (!hid_is_usb(hdev)) + return -EINVAL; + + intf = to_usb_interface(hdev->dev.parent); + ifnum = intf->cur_altsetting->desc.bInterfaceNumber; + pk = kzalloc(sizeof(*pk), GFP_KERNEL); if (pk == NULL) { hid_err(hdev, "can't alloc descriptor\n"); diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 06b7908c874c..ee7e504e7279 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -124,6 +124,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c index 4556d2a50f75..d94ee0539421 100644 --- a/drivers/hid/hid-roccat-arvo.c +++ b/drivers/hid/hid-roccat-arvo.c @@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c index ce5f22519956..e95d59cd8d07 100644 --- a/drivers/hid/hid-roccat-isku.c +++ b/drivers/hid/hid-roccat-isku.c @@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index ea17abc7ad52..76da04801ca9 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c index 0316edf8c5bb..1896c69ea512 100644 --- a/drivers/hid/hid-roccat-koneplus.c +++ b/drivers/hid/hid-roccat-koneplus.c @@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c index 5248b3c7cf78..cf8eeb33a125 100644 --- a/drivers/hid/hid-roccat-konepure.c +++ b/drivers/hid/hid-roccat-konepure.c @@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c index 960012881570..6fb9b9563769 100644 --- a/drivers/hid/hid-roccat-kovaplus.c +++ b/drivers/hid/hid-roccat-kovaplus.c @@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c index 4a88a76d5c62..d5ddf0d68346 100644 --- a/drivers/hid/hid-roccat-lua.c +++ b/drivers/hid/hid-roccat-lua.c @@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c index 989927defe8d..4fcc8e7d276f 100644 --- a/drivers/hid/hid-roccat-pyra.c +++ b/drivers/hid/hid-roccat-pyra.c @@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c index 3956a6c9c521..5bf1971a2b14 100644 --- a/drivers/hid/hid-roccat-ryos.c +++ b/drivers/hid/hid-roccat-ryos.c @@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c index 818701f7a028..a784bb4ee651 100644 --- a/drivers/hid/hid-roccat-savu.c +++ b/drivers/hid/hid-roccat-savu.c @@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c index 2e1c31156eca..cf5992e97094 100644 --- a/drivers/hid/hid-samsung.c +++ b/drivers/hid/hid-samsung.c @@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev, int ret; unsigned int cmask = HID_CONNECT_DEFAULT; + if (!hid_is_usb(hdev)) + return -EINVAL; + ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index d1b107d547f5..60ec2b29d54d 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -3000,7 +3000,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) sc->quirks = quirks; hid_set_drvdata(hdev, sc); sc->hdev = hdev; - usbdev = to_usb_device(sc->hdev->dev.parent->parent); ret = hid_parse(hdev); if (ret) { @@ -3038,14 +3037,23 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) */ if (!(hdev->claimed & HID_CLAIMED_INPUT)) { hid_err(hdev, "failed to claim input\n"); - hid_hw_stop(hdev); - return -ENODEV; + ret = -ENODEV; + goto err; } if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) { + if (!hid_is_usb(hdev)) { + ret = -EINVAL; + goto err; + } + + usbdev = to_usb_device(sc->hdev->dev.parent->parent); + sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!sc->ghl_urb) - return -ENOMEM; + if (!sc->ghl_urb) { + ret = -ENOMEM; + goto err; + } if (sc->quirks & GHL_GUITAR_PS3WIIU) ret = ghl_init_urb(sc, usbdev, ghl_ps3wiiu_magic_data, @@ -3055,7 +3063,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) ARRAY_SIZE(ghl_ps4_magic_data)); if (ret) { hid_err(hdev, "error preparing URB\n"); - return ret; + goto err; } timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0); @@ -3064,6 +3072,10 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) } return ret; + +err: + hid_hw_stop(hdev); + return ret; } static void sony_remove(struct hid_device *hdev) diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c index 3a5333424aa3..03b935ff02d5 100644 --- a/drivers/hid/hid-thrustmaster.c +++ b/drivers/hid/hid-thrustmaster.c @@ -274,6 +274,9 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i int ret = 0; struct tm_wheel *tm_wheel = NULL; + if (!hid_is_usb(hdev)) + return -EINVAL; + ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed with error %d\n", ret); diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c index 31ea7fc69916..ad489caf53ad 100644 --- a/drivers/hid/hid-u2fzero.c +++ b/drivers/hid/hid-u2fzero.c @@ -311,7 +311,7 @@ static int u2fzero_probe(struct hid_device *hdev, unsigned int minor; int ret; - if (!hid_is_using_ll_driver(hdev, &usb_hid_driver)) + if (!hid_is_usb(hdev)) return -EINVAL; dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c index 6a9865dd703c..d8ab0139e5cd 100644 --- a/drivers/hid/hid-uclogic-core.c +++ b/drivers/hid/hid-uclogic-core.c @@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev, struct uclogic_drvdata *drvdata = NULL; bool params_initialized = false; + if (!hid_is_usb(hdev)) + return -EINVAL; + /* * libinput requires the pad interface to be on a different node * than the pen, so use QUIRK_MULTI_INPUT for all tablets. diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index 3d67b748a3b9..adff1bd68d9f 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c @@ -843,8 +843,7 @@ int uclogic_params_init(struct uclogic_params *params, struct uclogic_params p = {0, }; /* Check arguments */ - if (params == NULL || hdev == NULL || - !hid_is_using_ll_driver(hdev, &usb_hid_driver)) { + if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) { rc = -EINVAL; goto cleanup; } diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 1c5039081db2..8e9d9450cb83 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -266,7 +266,8 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work) if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag && IPC_IS_ISH_ILUP(fwsts)) { - disable_irq_wake(pdev->irq); + if (device_may_wakeup(&pdev->dev)) + disable_irq_wake(pdev->irq); ish_set_host_ready(dev); @@ -337,7 +338,8 @@ static int __maybe_unused ish_suspend(struct device *device) */ pci_save_state(pdev); - enable_irq_wake(pdev->irq); + if (device_may_wakeup(&pdev->dev)) + enable_irq_wake(pdev->irq); } } else { /* diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 2717d39600b4..066c567dbaa2 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev, * Skip the query for this type and modify defaults based on * interface number. */ - if (features->type == WIRELESS) { + if (features->type == WIRELESS && intf) { if (intf->cur_altsetting->desc.bInterfaceNumber == 0) features->device_type = WACOM_DEVICETYPE_WL_MONITOR; else @@ -2214,7 +2214,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix) if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) { char *product_name = wacom->hdev->name; - if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) { + if (hid_is_usb(wacom->hdev)) { struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent); struct usb_device *dev = interface_to_usbdev(intf); product_name = dev->product; @@ -2451,6 +2451,9 @@ static void wacom_wireless_work(struct work_struct *work) wacom_destroy_battery(wacom); + if (!usbdev) + return; + /* Stylus interface */ hdev1 = usb_get_intfdata(usbdev->config->interface[1]); wacom1 = hid_get_drvdata(hdev1); @@ -2730,8 +2733,6 @@ static void wacom_mode_change_work(struct work_struct *work) static int wacom_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - struct usb_device *dev = interface_to_usbdev(intf); struct wacom *wacom; struct wacom_wac *wacom_wac; struct wacom_features *features; @@ -2766,8 +2767,14 @@ static int wacom_probe(struct hid_device *hdev, wacom_wac->hid_data.inputmode = -1; wacom_wac->mode_report = -1; - wacom->usbdev = dev; - wacom->intf = intf; + if (hid_is_usb(hdev)) { + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct usb_device *dev = interface_to_usbdev(intf); + + wacom->usbdev = dev; + wacom->intf = intf; + } + mutex_init(&wacom->lock); INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work); INIT_WORK(&wacom->wireless_work, wacom_wireless_work); diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c index 72df563477b1..f8639a4457d2 100644 --- a/drivers/i2c/busses/i2c-cbus-gpio.c +++ b/drivers/i2c/busses/i2c-cbus-gpio.c @@ -195,8 +195,9 @@ static u32 cbus_i2c_func(struct i2c_adapter *adapter) } static const struct i2c_algorithm cbus_i2c_algo = { - .smbus_xfer = cbus_i2c_smbus_xfer, - .functionality = cbus_i2c_func, + .smbus_xfer = cbus_i2c_smbus_xfer, + .smbus_xfer_atomic = cbus_i2c_smbus_xfer, + .functionality = cbus_i2c_func, }; static int cbus_i2c_remove(struct platform_device *pdev) diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 819ab4ee517e..02ddb237f69a 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -423,8 +423,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd) if (!(ipd & REG_INT_MBRF)) return; - /* ack interrupt */ - i2c_writel(i2c, REG_INT_MBRF, REG_IPD); + /* ack interrupt (read also produces a spurious START flag, clear it too) */ + i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD); /* Can only handle a maximum of 32 bytes at a time */ if (len > 32) diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index b9b19a2a2ffa..66145d2b9b55 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -1493,6 +1493,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) { struct stm32f7_i2c_dev *i2c_dev = data; struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + struct stm32_i2c_dma *dma = i2c_dev->dma; void __iomem *base = i2c_dev->base; u32 status, mask; int ret = IRQ_HANDLED; @@ -1518,6 +1519,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n", __func__, f7_msg->addr); writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR); + if (i2c_dev->use_dma) { + stm32f7_i2c_disable_dma_req(i2c_dev); + dmaengine_terminate_async(dma->chan_using); + } f7_msg->result = -ENXIO; } @@ -1533,7 +1538,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) /* Clear STOP flag */ writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR); - if (i2c_dev->use_dma) { + if (i2c_dev->use_dma && !f7_msg->result) { ret = IRQ_WAKE_THREAD; } else { i2c_dev->master_mode = false; @@ -1546,7 +1551,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) if (f7_msg->stop) { mask = STM32F7_I2C_CR2_STOP; stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask); - } else if (i2c_dev->use_dma) { + } else if (i2c_dev->use_dma && !f7_msg->result) { ret = IRQ_WAKE_THREAD; } else if (f7_msg->smbus) { stm32f7_i2c_smbus_rep_start(i2c_dev); @@ -1583,7 +1588,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data) if (!ret) { dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__); stm32f7_i2c_disable_dma_req(i2c_dev); - dmaengine_terminate_all(dma->chan_using); + dmaengine_terminate_async(dma->chan_using); f7_msg->result = -ETIMEDOUT; } @@ -1660,7 +1665,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data) /* Disable dma */ if (i2c_dev->use_dma) { stm32f7_i2c_disable_dma_req(i2c_dev); - dmaengine_terminate_all(dma->chan_using); + dmaengine_terminate_async(dma->chan_using); } i2c_dev->master_mode = false; @@ -1696,12 +1701,26 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, time_left = wait_for_completion_timeout(&i2c_dev->complete, i2c_dev->adap.timeout); ret = f7_msg->result; + if (ret) { + if (i2c_dev->use_dma) + dmaengine_synchronize(dma->chan_using); + + /* + * It is possible that some unsent data have already been + * written into TXDR. To avoid sending old data in a + * further transfer, flush TXDR in case of any error + */ + writel_relaxed(STM32F7_I2C_ISR_TXE, + i2c_dev->base + STM32F7_I2C_ISR); + goto pm_free; + } if (!time_left) { dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n", i2c_dev->msg->addr); if (i2c_dev->use_dma) - dmaengine_terminate_all(dma->chan_using); + dmaengine_terminate_sync(dma->chan_using); + stm32f7_i2c_wait_free_bus(i2c_dev); ret = -ETIMEDOUT; } @@ -1744,13 +1763,25 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, timeout = wait_for_completion_timeout(&i2c_dev->complete, i2c_dev->adap.timeout); ret = f7_msg->result; - if (ret) + if (ret) { + if (i2c_dev->use_dma) + dmaengine_synchronize(dma->chan_using); + + /* + * It is possible that some unsent data have already been + * written into TXDR. To avoid sending old data in a + * further transfer, flush TXDR in case of any error + */ + writel_relaxed(STM32F7_I2C_ISR_TXE, + i2c_dev->base + STM32F7_I2C_ISR); goto pm_free; + } if (!timeout) { dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr); if (i2c_dev->use_dma) - dmaengine_terminate_all(dma->chan_using); + dmaengine_terminate_sync(dma->chan_using); + stm32f7_i2c_wait_free_bus(i2c_dev); ret = -ETIMEDOUT; goto pm_free; } diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 13cbeb997cc1..58da08cc3d01 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -929,10 +929,8 @@ static int __init amd_iommu_v2_init(void) { int ret; - pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n"); - if (!amd_iommu_v2_supported()) { - pr_info("AMD IOMMUv2 functionality not available on this system\n"); + pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n"); /* * Load anyway to provide the symbols to other modules * which may use AMD IOMMUv2 optionally. @@ -947,6 +945,8 @@ static int __init amd_iommu_v2_init(void) amd_iommu_register_ppr_notifier(&ppr_nb); + pr_info("AMD IOMMUv2 loaded and initialized\n"); + return 0; out: diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c index b39d223926a4..71596fc62822 100644 --- a/drivers/iommu/intel/cap_audit.c +++ b/drivers/iommu/intel/cap_audit.c @@ -144,6 +144,7 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type) { struct dmar_drhd_unit *d; struct intel_iommu *i; + int rc = 0; rcu_read_lock(); if (list_empty(&dmar_drhd_units)) @@ -169,11 +170,11 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type) */ if (intel_cap_smts_sanity() && !intel_cap_flts_sanity() && !intel_cap_slts_sanity()) - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; out: rcu_read_unlock(); - return 0; + return rc; } int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 0bde0c8b4126..b6a8f3282411 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1339,13 +1339,11 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pte = &pte[pfn_level_offset(pfn, level)]; do { - unsigned long level_pfn; + unsigned long level_pfn = pfn & level_mask(level); if (!dma_pte_present(pte)) goto next; - level_pfn = pfn & level_mask(level); - /* If range covers entire pagetable, free it */ if (start_pfn <= level_pfn && last_pfn >= level_pfn + level_size(level) - 1) { @@ -1366,7 +1364,7 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, freelist); } next: - pfn += level_size(level); + pfn = level_pfn + level_size(level); } while (!first_pte_in_page(++pte) && pfn <= last_pfn); if (first_pte) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 5cb260820eda..7f23ad61c094 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -200,8 +200,8 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte) #define DTE_HI_MASK2 GENMASK(7, 4) #define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */ #define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */ -#define PAGE_DESC_HI_MASK1 GENMASK_ULL(39, 36) -#define PAGE_DESC_HI_MASK2 GENMASK_ULL(35, 32) +#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32) +#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36) static inline phys_addr_t rk_dte_pt_address_v2(u32 dte) { diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 7313454e403a..e69c4bf557bf 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -32,7 +32,7 @@ #include <linux/mutex.h> #include <linux/rcupdate.h> -static int showcapimsgs = 0; +static int showcapimsgs; static struct workqueue_struct *kcapi_wq; module_param(showcapimsgs, uint, 0); diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 44bac4ad687c..46aa3554e97b 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -643,6 +643,64 @@ static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev) kfree(cl_vtag); } +void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size) +{ + struct mei_device *bus; + struct mei_cl *cl; + int ret; + + if (!cldev || !buffer_id || !size) + return ERR_PTR(-EINVAL); + + if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) { + dev_err(&cldev->dev, "Map size should be aligned to %lu\n", + MEI_FW_PAGE_SIZE); + return ERR_PTR(-EINVAL); + } + + cl = cldev->cl; + bus = cldev->bus; + + mutex_lock(&bus->device_lock); + if (cl->state == MEI_FILE_UNINITIALIZED) { + ret = mei_cl_link(cl); + if (ret) + goto out; + /* update pointers */ + cl->cldev = cldev; + } + + ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size); +out: + mutex_unlock(&bus->device_lock); + if (ret) + return ERR_PTR(ret); + return cl->dma.vaddr; +} +EXPORT_SYMBOL_GPL(mei_cldev_dma_map); + +int mei_cldev_dma_unmap(struct mei_cl_device *cldev) +{ + struct mei_device *bus; + struct mei_cl *cl; + int ret; + + if (!cldev) + return -EINVAL; + + cl = cldev->cl; + bus = cldev->bus; + + mutex_lock(&bus->device_lock); + ret = mei_cl_dma_unmap(cl, NULL); + + mei_cl_flush_queues(cl, NULL); + mei_cl_unlink(cl); + mutex_unlock(&bus->device_lock); + return ret; +} +EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap); + /** * mei_cldev_enable - enable me client device * create connection with me client @@ -753,9 +811,11 @@ int mei_cldev_disable(struct mei_cl_device *cldev) dev_err(bus->dev, "Could not disconnect from the ME client\n"); out: - /* Flush queues and remove any pending read */ - mei_cl_flush_queues(cl, NULL); - mei_cl_unlink(cl); + /* Flush queues and remove any pending read unless we have mapped DMA */ + if (!cl->dma_mapped) { + mei_cl_flush_queues(cl, NULL); + mei_cl_unlink(cl); + } mutex_unlock(&bus->device_lock); return err; @@ -1052,6 +1112,7 @@ static void mei_cl_bus_dev_release(struct device *dev) if (!cldev) return; + mei_cl_flush_queues(cldev->cl, NULL); mei_me_cl_put(cldev->me_cl); mei_dev_bus_put(cldev->bus); mei_cl_unlink(cldev->cl); diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 96f4e59c32a5..0e90591235a6 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -700,6 +700,9 @@ int mei_cl_unlink(struct mei_cl *cl) cl_dbg(dev, cl, "unlink client"); + if (cl->state == MEI_FILE_UNINITIALIZED) + return 0; + if (dev->open_handle_count > 0) dev->open_handle_count--; diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index dfd60c916da0..b46077b17114 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -23,6 +23,11 @@ #define MEI_HBM_TIMEOUT 1 /* 1 second */ /* + * FW page size for DMA allocations + */ +#define MEI_FW_PAGE_SIZE 4096UL + +/* * MEI Version */ #define HBM_MINOR_VERSION 2 diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 9802e265fca8..2b317ed6c103 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -96,6 +96,13 @@ struct dataflash { struct mtd_info mtd; }; +static const struct spi_device_id dataflash_dev_ids[] = { + { "at45" }, + { "dataflash" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, dataflash_dev_ids); + #ifdef CONFIG_OF static const struct of_device_id dataflash_dt_ids[] = { { .compatible = "atmel,at45", }, @@ -927,6 +934,7 @@ static struct spi_driver dataflash_driver = { .name = "mtd_dataflash", .of_match_table = of_match_ptr(dataflash_dt_ids), }, + .id_table = dataflash_dev_ids, .probe = dataflash_probe, .remove = dataflash_remove, diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 67b7cb67c030..0a45d3c6c15b 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -26,7 +26,7 @@ config MTD_NAND_DENALI_PCI config MTD_NAND_DENALI_DT tristate "Denali NAND controller as a DT device" select MTD_NAND_DENALI - depends on HAS_DMA && HAVE_CLK && OF + depends on HAS_DMA && HAVE_CLK && OF && HAS_IOMEM help Enable the driver for NAND flash on platforms using a Denali NAND controller as a DT device. diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 658f0cbe7ce8..6b2bda815b88 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -15,6 +15,7 @@ #include <linux/clk.h> #include <linux/completion.h> +#include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-direction.h> #include <linux/dma-mapping.h> @@ -93,6 +94,14 @@ #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) +/* + * According to SPEAr300 Reference Manual (RM0082) + * TOUDEL = 7ns (Output delay from the flip-flops to the board) + * TINDEL = 5ns (Input delay from the board to the flipflop) + */ +#define TOUTDEL 7000 +#define TINDEL 5000 + struct fsmc_nand_timings { u8 tclr; u8 tar; @@ -277,7 +286,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, { unsigned long hclk = clk_get_rate(host->clk); unsigned long hclkn = NSEC_PER_SEC / hclk; - u32 thiz, thold, twait, tset; + u32 thiz, thold, twait, tset, twait_min; if (sdrt->tRC_min < 30000) return -EOPNOTSUPP; @@ -309,13 +318,6 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, else if (tims->thold > FSMC_THOLD_MASK) tims->thold = FSMC_THOLD_MASK; - twait = max(sdrt->tRP_min, sdrt->tWP_min); - tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1; - if (tims->twait == 0) - tims->twait = 1; - else if (tims->twait > FSMC_TWAIT_MASK) - tims->twait = FSMC_TWAIT_MASK; - tset = max(sdrt->tCS_min - sdrt->tWP_min, sdrt->tCEA_max - sdrt->tREA_max); tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1; @@ -324,6 +326,21 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, else if (tims->tset > FSMC_TSET_MASK) tims->tset = FSMC_TSET_MASK; + /* + * According to SPEAr300 Reference Manual (RM0082) which gives more + * information related to FSMSC timings than the SPEAr600 one (RM0305), + * twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL + */ + twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000) + + TOUTDEL + TINDEL; + twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min); + + tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1; + if (tims->twait == 0) + tims->twait = 1; + else if (tims->twait > FSMC_TWAIT_MASK) + tims->twait = FSMC_TWAIT_MASK; + return 0; } @@ -664,6 +681,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, instr->ctx.waitrdy.timeout_ms); break; } + + if (instr->delay_ns) + ndelay(instr->delay_ns); } return ret; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 3d6c6e880520..a130320de412 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -926,7 +926,7 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, struct nand_sdr_timings *spec_timings) { const struct nand_controller_ops *ops = chip->controller->ops; - int best_mode = 0, mode, ret; + int best_mode = 0, mode, ret = -EOPNOTSUPP; iface->type = NAND_SDR_IFACE; @@ -977,7 +977,7 @@ int nand_choose_best_nvddr_timings(struct nand_chip *chip, struct nand_nvddr_timings *spec_timings) { const struct nand_controller_ops *ops = chip->controller->ops; - int best_mode = 0, mode, ret; + int best_mode = 0, mode, ret = -EOPNOTSUPP; iface->type = NAND_NVDDR_IFACE; @@ -1837,7 +1837,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) NAND_OP_CMD(NAND_CMD_ERASE1, 0), NAND_OP_ADDR(2, addrs, 0), NAND_OP_CMD(NAND_CMD_ERASE2, - NAND_COMMON_TIMING_MS(conf, tWB_max)), + NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max), 0), }; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 10506a4b66ef..6cccc3dc00bc 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -567,9 +567,7 @@ config XEN_NETDEV_BACKEND config VMXNET3 tristate "VMware VMXNET3 ethernet driver" depends on PCI && INET - depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \ - IA64_PAGE_SIZE_64KB || PARISC_PAGE_SIZE_64KB || \ - PPC_64K_PAGES) + depends on PAGE_SIZE_LESS_THAN_64KB help This driver supports VMware's vmxnet3 virtual ethernet NIC. To compile this driver as a module, choose M here: the diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index edffc3489a12..ba587e5fc24f 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -38,6 +38,13 @@ struct bareudp_net { struct list_head bareudp_list; }; +struct bareudp_conf { + __be16 ethertype; + __be16 port; + u16 sport_min; + bool multi_proto_mode; +}; + /* Pseudo network device */ struct bareudp_dev { struct net *net; /* netns for packet i/o */ @@ -602,7 +609,8 @@ static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn, } static int bareudp_configure(struct net *net, struct net_device *dev, - struct bareudp_conf *conf) + struct bareudp_conf *conf, + struct netlink_ext_ack *extack) { struct bareudp_net *bn = net_generic(net, bareudp_net_id); struct bareudp_dev *t, *bareudp = netdev_priv(dev); @@ -611,13 +619,17 @@ static int bareudp_configure(struct net *net, struct net_device *dev, bareudp->net = net; bareudp->dev = dev; t = bareudp_find_dev(bn, conf); - if (t) + if (t) { + NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists"); return -EBUSY; + } if (conf->multi_proto_mode && (conf->ethertype != htons(ETH_P_MPLS_UC) && - conf->ethertype != htons(ETH_P_IP))) + conf->ethertype != htons(ETH_P_IP))) { + NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)"); return -EINVAL; + } bareudp->port = conf->port; bareudp->ethertype = conf->ethertype; @@ -664,7 +676,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev, if (err) return err; - err = bareudp_configure(net, dev, &conf); + err = bareudp_configure(net, dev, &conf, extack); if (err) return err; @@ -721,40 +733,6 @@ static struct rtnl_link_ops bareudp_link_ops __read_mostly = { .fill_info = bareudp_fill_info, }; -struct net_device *bareudp_dev_create(struct net *net, const char *name, - u8 name_assign_type, - struct bareudp_conf *conf) -{ - struct nlattr *tb[IFLA_MAX + 1]; - struct net_device *dev; - int err; - - memset(tb, 0, sizeof(tb)); - dev = rtnl_create_link(net, name, name_assign_type, - &bareudp_link_ops, tb, NULL); - if (IS_ERR(dev)) - return dev; - - err = bareudp_configure(net, dev, conf); - if (err) { - free_netdev(dev); - return ERR_PTR(err); - } - err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN); - if (err) - goto err; - - err = rtnl_configure_link(dev, NULL); - if (err < 0) - goto err; - - return dev; -err: - bareudp_dellink(dev, NULL); - return ERR_PTR(err); -} -EXPORT_SYMBOL_GPL(bareudp_dev_create); - static __net_init int bareudp_init_net(struct net *net) { struct bareudp_net *bn = net_generic(net, bareudp_net_id); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 2ec8e015c7b3..533e476988f2 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1501,14 +1501,14 @@ void bond_alb_monitor(struct work_struct *work) struct slave *slave; if (!bond_has_slaves(bond)) { - bond_info->tx_rebalance_counter = 0; + atomic_set(&bond_info->tx_rebalance_counter, 0); bond_info->lp_counter = 0; goto re_arm; } rcu_read_lock(); - bond_info->tx_rebalance_counter++; + atomic_inc(&bond_info->tx_rebalance_counter); bond_info->lp_counter++; /* send learning packets */ @@ -1530,7 +1530,7 @@ void bond_alb_monitor(struct work_struct *work) } /* rebalance tx traffic */ - if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) { + if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) { bond_for_each_slave_rcu(bond, slave, iter) { tlb_clear_slave(bond, slave, 1); if (slave == rcu_access_pointer(bond->curr_active_slave)) { @@ -1540,7 +1540,7 @@ void bond_alb_monitor(struct work_struct *work) bond_info->unbalanced_load = 0; } } - bond_info->tx_rebalance_counter = 0; + atomic_set(&bond_info->tx_rebalance_counter, 0); } if (bond_info->rlb_enabled) { @@ -1610,7 +1610,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) tlb_init_slave(slave); /* order a rebalance ASAP */ - bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS; + atomic_set(&bond->alb_info.tx_rebalance_counter, + BOND_TLB_REBALANCE_TICKS); if (bond->alb_info.rlb_enabled) bond->alb_info.rlb_rebalance = 1; @@ -1647,7 +1648,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char rlb_clear_slave(bond, slave); } else if (link == BOND_LINK_UP) { /* order a rebalance ASAP */ - bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS; + atomic_set(&bond_info->tx_rebalance_counter, + BOND_TLB_REBALANCE_TICKS); if (bond->alb_info.rlb_enabled) { bond->alb_info.rlb_rebalance = 1; /* If the updelay module parameter is smaller than the diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 0f39ad2af81c..b60e22f6394a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1097,9 +1097,6 @@ static bool bond_should_notify_peers(struct bonding *bond) slave = rcu_dereference(bond->curr_active_slave); rcu_read_unlock(); - netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", - slave ? slave->dev->name : "NULL"); - if (!slave || !bond->send_peer_notif || bond->send_peer_notif % max(1, bond->params.peer_notif_delay) != 0 || @@ -1107,6 +1104,9 @@ static bool bond_should_notify_peers(struct bonding *bond) test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; + netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", + slave ? slave->dev->name : "NULL"); + return true; } @@ -4094,6 +4094,7 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm struct mii_ioctl_data *mii = NULL; const struct net_device_ops *ops; struct net_device *real_dev; + struct hwtstamp_config cfg; struct ifreq ifrr; int res = 0; @@ -4124,21 +4125,29 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm break; case SIOCSHWTSTAMP: case SIOCGHWTSTAMP: - rcu_read_lock(); - real_dev = bond_option_active_slave_get_rcu(bond); - rcu_read_unlock(); - if (real_dev) { - strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ); - ifrr.ifr_ifru = ifr->ifr_ifru; + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + if (cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX) { + rcu_read_lock(); + real_dev = bond_option_active_slave_get_rcu(bond); + rcu_read_unlock(); + if (real_dev) { + strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ); + ifrr.ifr_ifru = ifr->ifr_ifru; - ops = real_dev->netdev_ops; - if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) - res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd); + ops = real_dev->netdev_ops; + if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) { + res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd); - if (!res) - ifr->ifr_ifru = ifrr.ifr_ifru; + if (!res) + ifr->ifr_ifru = ifrr.ifr_ifru; + + return res; + } + } } - break; + fallthrough; default: res = -EOPNOTSUPP; } diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c index 0509625c3082..d5fca3bfaf9a 100644 --- a/drivers/net/can/dev/bittiming.c +++ b/drivers/net/can/dev/bittiming.c @@ -4,6 +4,7 @@ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> */ +#include <linux/units.h> #include <linux/can/dev.h> #ifdef CONFIG_CAN_CALC_BITTIMING @@ -81,9 +82,9 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, if (bt->sample_point) { sample_point_nominal = bt->sample_point; } else { - if (bt->bitrate > 800 * CAN_KBPS) + if (bt->bitrate > 800 * KILO /* BPS */) sample_point_nominal = 750; - else if (bt->bitrate > 500 * CAN_KBPS) + else if (bt->bitrate > 500 * KILO /* BPS */) sample_point_nominal = 800; else sample_point_nominal = 875; diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 74d9899fc904..eb74cdf26b88 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -248,6 +248,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_SPACK_EWLR BIT(23) #define KVASER_PCIEFD_SPACK_EPLR BIT(24) +/* Kvaser KCAN_EPACK second word */ +#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) + struct kvaser_pciefd; struct kvaser_pciefd_can { @@ -1285,7 +1288,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, can->err_rep_cnt++; can->can.can_stats.bus_error++; - stats->rx_errors++; + if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) + stats->tx_errors++; + else + stats->rx_errors++; can->bec.txerr = bec.txerr; can->bec.rxerr = bec.rxerr; diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 2470c47b2e31..c2a8421e7845 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -204,16 +204,16 @@ enum m_can_reg { /* Interrupts for version 3.0.x */ #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) -#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \ - IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ - IR_RF1L | IR_RF0L) +#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \ + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ + IR_RF0L) #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X) /* Interrupts for version >= 3.1.x */ #define IR_ERR_LEC_31X (IR_PED | IR_PEA) -#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \ - IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ - IR_RF1L | IR_RF0L) +#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \ + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ + IR_RF0L) #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X) /* Interrupt Line Select (ILS) */ @@ -517,7 +517,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs) err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA, cf->data, DIV_ROUND_UP(cf->len, 4)); if (err) - goto out_fail; + goto out_free_skb; } /* acknowledge rx fifo 0 */ @@ -532,6 +532,8 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs) return 0; +out_free_skb: + kfree_skb(skb); out_fail: netdev_err(dev, "FIFO read returned %d\n", err); return err; @@ -810,8 +812,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) { if (irqstatus & IR_WDI) netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); - if (irqstatus & IR_ELO) - netdev_err(dev, "Error Logging Overflow\n"); if (irqstatus & IR_BEU) netdev_err(dev, "Bit Error Uncorrected\n"); if (irqstatus & IR_BEC) @@ -1494,20 +1494,32 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) case 30: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - cdev->can.bittiming_const = &m_can_bittiming_const_30X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_30X; + + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_30X; break; case 31: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - cdev->can.bittiming_const = &m_can_bittiming_const_31X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_31X; + + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_31X; break; case 32: case 33: /* Support both MCAN version v3.2.x and v3.3.0 */ - cdev->can.bittiming_const = &m_can_bittiming_const_31X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_31X; + + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_31X; cdev->can.ctrlmode_supported |= (m_can_niso_supported(cdev) ? diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index d18b515e6ccc..2c5d40997168 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -85,6 +85,9 @@ struct m_can_classdev { struct sk_buff *tx_skb; struct phy *transceiver; + const struct can_bittiming_const *bit_timing; + const struct can_bittiming_const *data_timing; + struct m_can_ops *ops; int version; diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c index 89cc3d41e952..b56a54d6c5a9 100644 --- a/drivers/net/can/m_can/m_can_pci.c +++ b/drivers/net/can/m_can/m_can_pci.c @@ -18,9 +18,14 @@ #define M_CAN_PCI_MMIO_BAR 0 -#define M_CAN_CLOCK_FREQ_EHL 100000000 #define CTL_CSR_INT_CTL_OFFSET 0x508 +struct m_can_pci_config { + const struct can_bittiming_const *bit_timing; + const struct can_bittiming_const *data_timing; + unsigned int clock_freq; +}; + struct m_can_pci_priv { struct m_can_classdev cdev; @@ -42,8 +47,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); + void __iomem *src = priv->base + offset; - ioread32_rep(priv->base + offset, val, val_count); + while (val_count--) { + *(unsigned int *)val = ioread32(src); + val += 4; + src += 4; + } return 0; } @@ -61,8 +71,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, const void *val, size_t val_count) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); + void __iomem *dst = priv->base + offset; - iowrite32_rep(priv->base + offset, val, val_count); + while (val_count--) { + iowrite32(*(unsigned int *)val, dst); + val += 4; + dst += 4; + } return 0; } @@ -74,9 +89,40 @@ static struct m_can_ops m_can_pci_ops = { .read_fifo = iomap_read_fifo, }; +static const struct can_bittiming_const m_can_bittiming_const_ehl = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 64, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1, +}; + +static const struct can_bittiming_const m_can_data_bittiming_const_ehl = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static const struct m_can_pci_config m_can_pci_ehl = { + .bit_timing = &m_can_bittiming_const_ehl, + .data_timing = &m_can_data_bittiming_const_ehl, + .clock_freq = 200000000, +}; + static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) { struct device *dev = &pci->dev; + const struct m_can_pci_config *cfg; struct m_can_classdev *mcan_class; struct m_can_pci_priv *priv; void __iomem *base; @@ -104,6 +150,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) if (!mcan_class) return -ENOMEM; + cfg = (const struct m_can_pci_config *)id->driver_data; + priv = cdev_to_priv(mcan_class); priv->base = base; @@ -115,7 +163,9 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) mcan_class->dev = &pci->dev; mcan_class->net->irq = pci_irq_vector(pci, 0); mcan_class->pm_clock_support = 1; - mcan_class->can.clock.freq = id->driver_data; + mcan_class->bit_timing = cfg->bit_timing; + mcan_class->data_timing = cfg->data_timing; + mcan_class->can.clock.freq = cfg->clock_freq; mcan_class->ops = &m_can_pci_ops; pci_set_drvdata(pci, mcan_class); @@ -168,8 +218,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops, m_can_pci_suspend, m_can_pci_resume); static const struct pci_device_id m_can_pci_id_table[] = { - { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, }, - { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, }, + { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, }, + { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, }, { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(pci, m_can_pci_id_table); diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 92a54a5fd4c5..964c8a09226a 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -692,11 +692,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota) cf->data[i + 1] = data_reg >> 8; } - netif_receive_skb(skb); rcv_pkts++; stats->rx_packets++; quota--; stats->rx_bytes += cf->len; + netif_receive_skb(skb); pch_fifo_thresh(priv, obj_num); obj_num++; diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c index e21b169c14c0..4642b6d4aaf7 100644 --- a/drivers/net/can/sja1000/ems_pcmcia.c +++ b/drivers/net/can/sja1000/ems_pcmcia.c @@ -234,7 +234,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base) free_sja1000dev(dev); } - err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, + if (!card->channels) { + err = -ENODEV; + goto failure_cleanup; + } + + err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, DRV_NAME, card); if (!err) return 0; diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 89d9c986a229..a17641d36468 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -25,11 +25,11 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/netdevice.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/spi/spi.h> @@ -828,19 +828,25 @@ MODULE_DEVICE_TABLE(spi, hi3110_id_table); static int hi3110_can_probe(struct spi_device *spi) { - const struct of_device_id *of_id = of_match_device(hi3110_of_match, - &spi->dev); + struct device *dev = &spi->dev; struct net_device *net; struct hi3110_priv *priv; + const void *match; struct clk *clk; - int freq, ret; + u32 freq; + int ret; + + clk = devm_clk_get_optional(&spi->dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "no CAN clock source defined\n"); - clk = devm_clk_get(&spi->dev, NULL); - if (IS_ERR(clk)) { - dev_err(&spi->dev, "no CAN clock source defined\n"); - return PTR_ERR(clk); + if (clk) { + freq = clk_get_rate(clk); + } else { + ret = device_property_read_u32(dev, "clock-frequency", &freq); + if (ret) + return dev_err_probe(dev, ret, "Failed to get clock-frequency!\n"); } - freq = clk_get_rate(clk); /* Sanity check */ if (freq > 40000000) @@ -851,11 +857,9 @@ static int hi3110_can_probe(struct spi_device *spi) if (!net) return -ENOMEM; - if (!IS_ERR(clk)) { - ret = clk_prepare_enable(clk); - if (ret) - goto out_free; - } + ret = clk_prepare_enable(clk); + if (ret) + goto out_free; net->netdev_ops = &hi3110_netdev_ops; net->flags |= IFF_ECHO; @@ -870,8 +874,9 @@ static int hi3110_can_probe(struct spi_device *spi) CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING; - if (of_id) - priv->model = (enum hi3110_model)(uintptr_t)of_id->data; + match = device_get_match_data(dev); + if (match) + priv->model = (enum hi3110_model)(uintptr_t)match; else priv->model = spi_get_device_id(spi)->driver_data; priv->net = net; @@ -918,9 +923,7 @@ static int hi3110_can_probe(struct spi_device *spi) ret = hi3110_hw_probe(spi); if (ret) { - if (ret == -ENODEV) - dev_err(&spi->dev, "Cannot initialize %x. Wrong wiring?\n", - priv->model); + dev_err_probe(dev, ret, "Cannot initialize %x. Wrong wiring?\n", priv->model); goto error_probe; } hi3110_hw_sleep(spi); @@ -938,14 +941,12 @@ static int hi3110_can_probe(struct spi_device *spi) hi3110_power_enable(priv->power, 0); out_clk: - if (!IS_ERR(clk)) - clk_disable_unprepare(clk); + clk_disable_unprepare(clk); out_free: free_candev(net); - dev_err(&spi->dev, "Probe failed, err=%d\n", -ret); - return ret; + return dev_err_probe(dev, ret, "Probe failed\n"); } static int hi3110_can_remove(struct spi_device *spi) @@ -957,8 +958,7 @@ static int hi3110_can_remove(struct spi_device *spi) hi3110_power_enable(priv->power, 0); - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); free_candev(net); diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 54aa7c25c4de..37862e7f6840 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -61,6 +61,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/reset.h> #define DRV_NAME "sun4i_can" @@ -200,10 +201,20 @@ #define SUN4I_CAN_MAX_IRQ 20 #define SUN4I_MODE_MAX_RETRIES 100 +/** + * struct sun4ican_quirks - Differences between SoC variants. + * + * @has_reset: SoC needs reset deasserted. + */ +struct sun4ican_quirks { + bool has_reset; +}; + struct sun4ican_priv { struct can_priv can; void __iomem *base; struct clk *clk; + struct reset_control *reset; spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */ }; @@ -702,6 +713,13 @@ static int sun4ican_open(struct net_device *dev) goto exit_irq; } + /* software reset deassert */ + err = reset_control_deassert(priv->reset); + if (err) { + netdev_err(dev, "could not deassert CAN reset\n"); + goto exit_soft_reset; + } + /* turn on clocking for CAN peripheral block */ err = clk_prepare_enable(priv->clk); if (err) { @@ -723,6 +741,8 @@ static int sun4ican_open(struct net_device *dev) exit_can_start: clk_disable_unprepare(priv->clk); exit_clock: + reset_control_assert(priv->reset); +exit_soft_reset: free_irq(dev->irq, dev); exit_irq: close_candev(dev); @@ -736,6 +756,7 @@ static int sun4ican_close(struct net_device *dev) netif_stop_queue(dev); sun4i_can_stop(dev); clk_disable_unprepare(priv->clk); + reset_control_assert(priv->reset); free_irq(dev->irq, dev); close_candev(dev); @@ -750,9 +771,27 @@ static const struct net_device_ops sun4ican_netdev_ops = { .ndo_start_xmit = sun4ican_start_xmit, }; +static const struct sun4ican_quirks sun4ican_quirks_a10 = { + .has_reset = false, +}; + +static const struct sun4ican_quirks sun4ican_quirks_r40 = { + .has_reset = true, +}; + static const struct of_device_id sun4ican_of_match[] = { - {.compatible = "allwinner,sun4i-a10-can"}, - {}, + { + .compatible = "allwinner,sun4i-a10-can", + .data = &sun4ican_quirks_a10 + }, { + .compatible = "allwinner,sun7i-a20-can", + .data = &sun4ican_quirks_a10 + }, { + .compatible = "allwinner,sun8i-r40-can", + .data = &sun4ican_quirks_r40 + }, { + /* sentinel */ + }, }; MODULE_DEVICE_TABLE(of, sun4ican_of_match); @@ -771,10 +810,28 @@ static int sun4ican_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct clk *clk; + struct reset_control *reset = NULL; void __iomem *addr; int err, irq; struct net_device *dev; struct sun4ican_priv *priv; + const struct sun4ican_quirks *quirks; + + quirks = of_device_get_match_data(&pdev->dev); + if (!quirks) { + dev_err(&pdev->dev, "failed to determine the quirks to use\n"); + err = -ENODEV; + goto exit; + } + + if (quirks->has_reset) { + reset = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(reset)) { + dev_err(&pdev->dev, "unable to request reset\n"); + err = PTR_ERR(reset); + goto exit; + } + } clk = of_clk_get(np, 0); if (IS_ERR(clk)) { @@ -818,6 +875,7 @@ static int sun4ican_probe(struct platform_device *pdev) CAN_CTRLMODE_3_SAMPLES; priv->base = addr; priv->clk = clk; + priv->reset = reset; spin_lock_init(&priv->cmdreg_lock); platform_set_drvdata(pdev, dev); diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c index 14e360c9f2c9..1bcdcece5ec7 100644 --- a/drivers/net/can/usb/etas_es58x/es581_4.c +++ b/drivers/net/can/usb/etas_es58x/es581_4.c @@ -10,6 +10,7 @@ */ #include <linux/kernel.h> +#include <linux/units.h> #include <asm/unaligned.h> #include "es58x_core.h" @@ -469,8 +470,8 @@ const struct es58x_parameters es581_4_param = { .bittiming_const = &es581_4_bittiming_const, .data_bittiming_const = NULL, .tdc_const = NULL, - .bitrate_max = 1 * CAN_MBPS, - .clock = {.freq = 50 * CAN_MHZ}, + .bitrate_max = 1 * MEGA /* BPS */, + .clock = {.freq = 50 * MEGA /* Hz */}, .ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC, .tx_start_of_frame = 0xAFAF, .rx_start_of_frame = 0xFAFA, diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c index 4f0cae29f4d8..ec87126e1a7d 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_fd.c +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -12,6 +12,7 @@ */ #include <linux/kernel.h> +#include <linux/units.h> #include <asm/unaligned.h> #include "es58x_core.h" @@ -522,8 +523,8 @@ const struct es58x_parameters es58x_fd_param = { * Mbps work in an optimal environment but are not recommended * for production environment. */ - .bitrate_max = 8 * CAN_MBPS, - .clock = {.freq = 80 * CAN_MHZ}, + .bitrate_max = 8 * MEGA /* BPS */, + .clock = {.freq = 80 * MEGA /* Hz */}, .ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO, diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 59ba7c7beec0..f7af1bf5ab46 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -28,10 +28,6 @@ #include "kvaser_usb.h" -/* Forward declaration */ -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; - -#define CAN_USB_CLOCK 8000000 #define MAX_USBCAN_NET_DEVICES 2 /* Command header size */ @@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; #define CMD_LEAF_LOG_MESSAGE 106 +/* Leaf frequency options */ +#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60 +#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0 +#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) +#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) + /* error factors */ #define M16C_EF_ACKE BIT(0) #define M16C_EF_CRCE BIT(1) @@ -340,6 +342,50 @@ struct kvaser_usb_err_summary { }; }; +static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { + .name = "kvaser_usb", + .tseg1_min = KVASER_USB_TSEG1_MIN, + .tseg1_max = KVASER_USB_TSEG1_MAX, + .tseg2_min = KVASER_USB_TSEG2_MIN, + .tseg2_max = KVASER_USB_TSEG2_MAX, + .sjw_max = KVASER_USB_SJW_MAX, + .brp_min = KVASER_USB_BRP_MIN, + .brp_max = KVASER_USB_BRP_MAX, + .brp_inc = KVASER_USB_BRP_INC, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = { + .clock = { + .freq = 8000000, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = { + .clock = { + .freq = 16000000, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = { + .clock = { + .freq = 24000000, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = { + .clock = { + .freq = 32000000, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + static void * kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct sk_buff *skb, int *frame_len, @@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev, return rc; } +static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, + const struct leaf_cmd_softinfo *softinfo) +{ + u32 sw_options = le32_to_cpu(softinfo->sw_options); + + dev->fw_version = le32_to_cpu(softinfo->fw_version); + dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); + + switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { + case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz; + break; + } +} + static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) { struct kvaser_cmd cmd; @@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) switch (dev->card_data.leaf.family) { case KVASER_LEAF: - dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version); - dev->max_tx_urbs = - le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx); + kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo); break; case KVASER_USBCAN: dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); dev->max_tx_urbs = le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); + dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz; break; } @@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; - dev->cfg = &kvaser_usb_leaf_dev_cfg; card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; return 0; } -static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { - .name = "kvaser_usb", - .tseg1_min = KVASER_USB_TSEG1_MIN, - .tseg1_max = KVASER_USB_TSEG1_MAX, - .tseg2_min = KVASER_USB_TSEG2_MIN, - .tseg2_max = KVASER_USB_TSEG2_MAX, - .sjw_max = KVASER_USB_SJW_MAX, - .brp_min = KVASER_USB_BRP_MIN, - .brp_max = KVASER_USB_BRP_MAX, - .brp_inc = KVASER_USB_BRP_INC, -}; - static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); @@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback, .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd, }; - -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = { - .clock = { - .freq = CAN_USB_CLOCK, - }, - .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, -}; diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index af4761968733..3867f3d4545f 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1860,7 +1860,8 @@ int b53_mdb_del(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_mdb_del); -int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) +int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct b53_device *dev = ds->priv; s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; @@ -1887,7 +1888,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); b53_for_each_port(dev, i) { - if (dsa_to_port(ds, i)->bridge_dev != br) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* Add this local port to the remote port VLAN control @@ -1911,7 +1912,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) } EXPORT_SYMBOL(b53_br_join); -void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) +void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) { struct b53_device *dev = ds->priv; struct b53_vlan *vl = &dev->vlans[0]; @@ -1923,7 +1924,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) b53_for_each_port(dev, i) { /* Don't touch the remaining ports */ - if (dsa_to_port(ds, i)->bridge_dev != br) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 579da74ada64..b41dc8ac2ca8 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -324,8 +324,9 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); int b53_get_sset_count(struct dsa_switch *ds, int port, int sset); void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data); -int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); -void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); +int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, + bool *tx_fwd_offload); +void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); void b53_br_fast_age(struct dsa_switch *ds, int port); int b53_br_flags_pre(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c index 01e37b75471e..2b88f03e5252 100644 --- a/drivers/net/dsa/b53/b53_spi.c +++ b/drivers/net/dsa/b53/b53_spi.c @@ -349,6 +349,19 @@ static const struct of_device_id b53_spi_of_match[] = { }; MODULE_DEVICE_TABLE(of, b53_spi_of_match); +static const struct spi_device_id b53_spi_ids[] = { + { .name = "bcm5325" }, + { .name = "bcm5365" }, + { .name = "bcm5395" }, + { .name = "bcm5397" }, + { .name = "bcm5398" }, + { .name = "bcm53115" }, + { .name = "bcm53125" }, + { .name = "bcm53128" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, b53_spi_ids); + static struct spi_driver b53_spi_driver = { .driver = { .name = "b53-switch", @@ -357,6 +370,7 @@ static struct spi_driver b53_spi_driver = { .probe = b53_spi_probe, .remove = b53_spi_remove, .shutdown = b53_spi_shutdown, + .id_table = b53_spi_ids, }; module_spi_driver(b53_spi_driver); diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index e638e3eea911..33daaf10c488 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -167,19 +167,20 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port, } static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n", - __func__, port, bridge->name); + __func__, port, bridge.dev->name); return 0; } static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge) { dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n", - __func__, port, bridge->name); + __func__, port, bridge.dev->name); } static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 4e0b53d94b52..726f267cb228 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -674,7 +674,8 @@ static int hellcreek_bridge_flags(struct dsa_switch *ds, int port, } static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct hellcreek *hellcreek = ds->priv; @@ -691,7 +692,7 @@ static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port, } static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { struct hellcreek *hellcreek = ds->priv; @@ -710,8 +711,9 @@ static int __hellcreek_fdb_add(struct hellcreek *hellcreek, u16 meta = 0; dev_dbg(hellcreek->dev, "Add static FDB entry: MAC=%pM, MASK=0x%02x, " - "OBT=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac, entry->portmask, - entry->is_obt, entry->reprio_en, entry->reprio_tc); + "OBT=%d, PASS_BLOCKED=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac, + entry->portmask, entry->is_obt, entry->pass_blocked, + entry->reprio_en, entry->reprio_tc); /* Add mac address */ hellcreek_write(hellcreek, entry->mac[1] | (entry->mac[0] << 8), HR_FDBWDH); @@ -722,6 +724,8 @@ static int __hellcreek_fdb_add(struct hellcreek *hellcreek, meta |= entry->portmask << HR_FDBWRM0_PORTMASK_SHIFT; if (entry->is_obt) meta |= HR_FDBWRM0_OBT; + if (entry->pass_blocked) + meta |= HR_FDBWRM0_PASS_BLOCKED; if (entry->reprio_en) { meta |= HR_FDBWRM0_REPRIO_EN; meta |= entry->reprio_tc << HR_FDBWRM0_REPRIO_TC_SHIFT; @@ -1049,7 +1053,7 @@ static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek) static int hellcreek_setup_fdb(struct hellcreek *hellcreek) { - static struct hellcreek_fdb_entry ptp = { + static struct hellcreek_fdb_entry l2_ptp = { /* MAC: 01-1B-19-00-00-00 */ .mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 }, .portmask = 0x03, /* Management ports */ @@ -1060,24 +1064,94 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek) .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */ .reprio_en = 1, }; - static struct hellcreek_fdb_entry p2p = { + static struct hellcreek_fdb_entry udp4_ptp = { + /* MAC: 01-00-5E-00-01-81 */ + .mac = { 0x01, 0x00, 0x5e, 0x00, 0x01, 0x81 }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 0, + .is_static = 1, + .reprio_tc = 6, + .reprio_en = 1, + }; + static struct hellcreek_fdb_entry udp6_ptp = { + /* MAC: 33-33-00-00-01-81 */ + .mac = { 0x33, 0x33, 0x00, 0x00, 0x01, 0x81 }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 0, + .is_static = 1, + .reprio_tc = 6, + .reprio_en = 1, + }; + static struct hellcreek_fdb_entry l2_p2p = { /* MAC: 01-80-C2-00-00-0E */ .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e }, .portmask = 0x03, /* Management ports */ .age = 0, .is_obt = 0, - .pass_blocked = 0, + .pass_blocked = 1, .is_static = 1, .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */ .reprio_en = 1, }; + static struct hellcreek_fdb_entry udp4_p2p = { + /* MAC: 01-00-5E-00-00-6B */ + .mac = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x6b }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 1, + .is_static = 1, + .reprio_tc = 6, + .reprio_en = 1, + }; + static struct hellcreek_fdb_entry udp6_p2p = { + /* MAC: 33-33-00-00-00-6B */ + .mac = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x6b }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 1, + .is_static = 1, + .reprio_tc = 6, + .reprio_en = 1, + }; + static struct hellcreek_fdb_entry stp = { + /* MAC: 01-80-C2-00-00-00 */ + .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 1, + .is_static = 1, + .reprio_tc = 6, + .reprio_en = 1, + }; int ret; mutex_lock(&hellcreek->reg_lock); - ret = __hellcreek_fdb_add(hellcreek, &ptp); + ret = __hellcreek_fdb_add(hellcreek, &l2_ptp); + if (ret) + goto out; + ret = __hellcreek_fdb_add(hellcreek, &udp4_ptp); if (ret) goto out; - ret = __hellcreek_fdb_add(hellcreek, &p2p); + ret = __hellcreek_fdb_add(hellcreek, &udp6_ptp); + if (ret) + goto out; + ret = __hellcreek_fdb_add(hellcreek, &l2_p2p); + if (ret) + goto out; + ret = __hellcreek_fdb_add(hellcreek, &udp4_p2p); + if (ret) + goto out; + ret = __hellcreek_fdb_add(hellcreek, &udp6_p2p); + if (ret) + goto out; + ret = __hellcreek_fdb_add(hellcreek, &stp); out: mutex_unlock(&hellcreek->reg_lock); @@ -1384,14 +1458,19 @@ static void hellcreek_teardown(struct dsa_switch *ds) dsa_devlink_resources_unregister(ds); } -static void hellcreek_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static void hellcreek_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct hellcreek *hellcreek = ds->priv; - dev_dbg(hellcreek->dev, "Phylink validate for port %d\n", port); + __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RGMII, config->supported_interfaces); + + /* Include GMII - the hardware does not support this interface + * mode, but it's the default interface mode for phylib, so we + * need it for compatibility with existing DT. + */ + __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); /* The MAC settings are a hardware configuration option and cannot be * changed at run time or by strapping. Therefore the attached PHYs @@ -1399,12 +1478,9 @@ static void hellcreek_phylink_validate(struct dsa_switch *ds, int port, * by the hardware. */ if (hellcreek->pdata->is_100_mbits) - phylink_set(mask, 100baseT_Full); + config->mac_capabilities = MAC_100FD; else - phylink_set(mask, 1000baseT_Full); - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); + config->mac_capabilities = MAC_1000FD; } static int @@ -1755,7 +1831,7 @@ static const struct dsa_switch_ops hellcreek_ds_ops = { .get_strings = hellcreek_get_strings, .get_tag_protocol = hellcreek_get_tag_protocol, .get_ts_info = hellcreek_get_ts_info, - .phylink_validate = hellcreek_phylink_validate, + .phylink_get_caps = hellcreek_phylink_get_caps, .port_bridge_flags = hellcreek_bridge_flags, .port_bridge_join = hellcreek_port_bridge_join, .port_bridge_leave = hellcreek_port_bridge_leave, diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c index 40b41c794dfa..b3bc948d6145 100644 --- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c +++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c @@ -52,10 +52,6 @@ static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port, */ clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state); - /* Reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_ON: tx_tstamp_enable = true; diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 89f920289ae2..d55784d19fa4 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1103,12 +1103,13 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port) } static int lan9303_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct lan9303 *chip = ds->priv; dev_dbg(chip->dev, "%s(port %d)\n", __func__, port); - if (dsa_to_port(ds, 1)->bridge_dev == dsa_to_port(ds, 2)->bridge_dev) { + if (dsa_port_bridge_same(dsa_to_port(ds, 1), dsa_to_port(ds, 2))) { lan9303_bridge_ports(chip); chip->is_bridged = true; /* unleash stp_state_set() */ } @@ -1117,7 +1118,7 @@ static int lan9303_port_bridge_join(struct dsa_switch *ds, int port, } static void lan9303_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { struct lan9303 *chip = ds->priv; diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 7056d98d8177..46ed953e787e 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -759,7 +759,7 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, struct netlink_ext_ack *extack) { - struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); struct gswip_priv *priv = ds->priv; /* Do not allow changing the VLAN filtering options while in bridge */ @@ -1146,16 +1146,18 @@ static int gswip_vlan_remove(struct gswip_priv *priv, } static int gswip_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { + struct net_device *br = bridge.dev; struct gswip_priv *priv = ds->priv; int err; /* When the bridge uses VLAN filtering we have to configure VLAN * specific bridges. No bridge is configured here. */ - if (!br_vlan_enabled(bridge)) { - err = gswip_vlan_add_unaware(priv, bridge, port); + if (!br_vlan_enabled(br)) { + err = gswip_vlan_add_unaware(priv, br, port); if (err) return err; priv->port_vlan_filter &= ~BIT(port); @@ -1166,8 +1168,9 @@ static int gswip_port_bridge_join(struct dsa_switch *ds, int port, } static void gswip_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge) { + struct net_device *br = bridge.dev; struct gswip_priv *priv = ds->priv; gswip_add_single_port_br(priv, port, true); @@ -1175,16 +1178,16 @@ static void gswip_port_bridge_leave(struct dsa_switch *ds, int port, /* When the bridge uses VLAN filtering we have to configure VLAN * specific bridges. No bridge is configured here. */ - if (!br_vlan_enabled(bridge)) - gswip_vlan_remove(priv, bridge, port, 0, true, false); + if (!br_vlan_enabled(br)) + gswip_vlan_remove(priv, br, port, 0, true, false); } static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) { + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); struct gswip_priv *priv = ds->priv; - struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; unsigned int max_ports = priv->hw_info->max_ports; int pos = max_ports; int i, idx = -1; @@ -1229,8 +1232,8 @@ static int gswip_port_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) { + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); struct gswip_priv *priv = ds->priv; - struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; int err; @@ -1254,8 +1257,8 @@ static int gswip_port_vlan_add(struct dsa_switch *ds, int port, static int gswip_port_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan) { + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); struct gswip_priv *priv = ds->priv; - struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; /* We have to receive all packets on the CPU port and should not @@ -1340,8 +1343,8 @@ static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) static int gswip_port_fdb(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid, bool add) { + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); struct gswip_priv *priv = ds->priv; - struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; struct gswip_pce_table_entry mac_bridge = {0,}; unsigned int cpu_port = priv->hw_info->cpu_port; int fid = -1; @@ -1438,114 +1441,70 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, return 0; } -static void gswip_phylink_set_capab(unsigned long *supported, - struct phylink_link_state *state) -{ - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - /* Allow all the expected bits */ - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - - /* With the exclusion of MII, Reverse MII and Reduced MII, we - * support Gigabit, including Half duplex - */ - if (state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_REVMII && - state->interface != PHY_INTERFACE_MODE_RMII) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseT_Half); - } - - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); -} - -static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { switch (port) { case 0: case 1: - if (!phy_interface_mode_is_rgmii(state->interface) && - state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_REVMII && - state->interface != PHY_INTERFACE_MODE_RMII) - goto unsupported; + phy_interface_set_rgmii(config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_MII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_REVMII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RMII, + config->supported_interfaces); break; + case 2: case 3: case 4: - if (state->interface != PHY_INTERFACE_MODE_INTERNAL) - goto unsupported; + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); break; + case 5: - if (!phy_interface_mode_is_rgmii(state->interface) && - state->interface != PHY_INTERFACE_MODE_INTERNAL) - goto unsupported; + phy_interface_set_rgmii(config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); break; - default: - linkmode_zero(supported); - dev_err(ds->dev, "Unsupported port: %i\n", port); - return; } - gswip_phylink_set_capab(supported, state); - - return; - -unsupported: - linkmode_zero(supported); - dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", - phy_modes(state->interface), port); + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000; } -static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { switch (port) { case 0: - if (!phy_interface_mode_is_rgmii(state->interface) && - state->interface != PHY_INTERFACE_MODE_GMII && - state->interface != PHY_INTERFACE_MODE_RMII) - goto unsupported; + phy_interface_set_rgmii(config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RMII, + config->supported_interfaces); break; + case 1: case 2: case 3: case 4: - if (state->interface != PHY_INTERFACE_MODE_INTERNAL) - goto unsupported; + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); break; + case 5: - if (!phy_interface_mode_is_rgmii(state->interface) && - state->interface != PHY_INTERFACE_MODE_INTERNAL && - state->interface != PHY_INTERFACE_MODE_RMII) - goto unsupported; + phy_interface_set_rgmii(config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RMII, + config->supported_interfaces); break; - default: - linkmode_zero(supported); - dev_err(ds->dev, "Unsupported port: %i\n", port); - return; } - gswip_phylink_set_capab(supported, state); - - return; - -unsupported: - linkmode_zero(supported); - dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", - phy_modes(state->interface), port); + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000; } static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) @@ -1827,7 +1786,7 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = { .port_fdb_add = gswip_port_fdb_add, .port_fdb_del = gswip_port_fdb_del, .port_fdb_dump = gswip_port_fdb_dump, - .phylink_validate = gswip_xrx200_phylink_validate, + .phylink_get_caps = gswip_xrx200_phylink_get_caps, .phylink_mac_config = gswip_phylink_mac_config, .phylink_mac_link_down = gswip_phylink_mac_link_down, .phylink_mac_link_up = gswip_phylink_mac_link_up, @@ -1851,7 +1810,7 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = { .port_fdb_add = gswip_port_fdb_add, .port_fdb_del = gswip_port_fdb_del, .port_fdb_dump = gswip_port_fdb_dump, - .phylink_validate = gswip_xrx300_phylink_validate, + .phylink_get_caps = gswip_xrx300_phylink_get_caps, .phylink_mac_config = gswip_phylink_mac_config, .phylink_mac_link_down = gswip_phylink_mac_link_down, .phylink_mac_link_up = gswip_phylink_mac_link_up, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 8a04302018dc..47a856533cff 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -43,7 +43,7 @@ void ksz_update_port_member(struct ksz_device *dev, int port) continue; if (port == i) continue; - if (!dp->bridge_dev || dp->bridge_dev != other_dp->bridge_dev) + if (!dsa_port_bridge_same(dp, other_dp)) continue; if (other_p->stp_state == BR_STATE_FORWARDING && @@ -192,7 +192,8 @@ void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf) EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats); int ksz_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { /* port_stp_state_set() will be called after to put the port in * appropriate state so there is no need to do anything. @@ -203,7 +204,7 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port, EXPORT_SYMBOL_GPL(ksz_port_bridge_join); void ksz_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { /* port_stp_state_set() will be called after to put the port in * forwarding state so there is no need to do anything. diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 54b456bc8972..df8ae59c8525 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -155,9 +155,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, int ksz_sset_count(struct dsa_switch *ds, int port, int sset); void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf); int ksz_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br); + struct dsa_bridge bridge, bool *tx_fwd_offload); void ksz_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br); + struct dsa_bridge bridge); void ksz_port_fast_age(struct dsa_switch *ds, int port); int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 9890672a206d..b82512e5b33b 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1186,29 +1186,33 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port, static int mt7530_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge, bool *tx_fwd_offload) { - struct mt7530_priv *priv = ds->priv; + struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; u32 port_bitmap = BIT(MT7530_CPU_PORT); - int i; + struct mt7530_priv *priv = ds->priv; mutex_lock(&priv->reg_mutex); - for (i = 0; i < MT7530_NUM_PORTS; i++) { + dsa_switch_for_each_user_port(other_dp, ds) { + int other_port = other_dp->index; + + if (dp == other_dp) + continue; + /* Add this port to the port matrix of the other ports in the * same bridge. If the port is disabled, port matrix is kept * and not being setup until the port becomes enabled. */ - if (dsa_is_user_port(ds, i) && i != port) { - if (dsa_to_port(ds, i)->bridge_dev != bridge) - continue; - if (priv->ports[i].enable) - mt7530_set(priv, MT7530_PCR_P(i), - PCR_MATRIX(BIT(port))); - priv->ports[i].pm |= PCR_MATRIX(BIT(port)); + if (!dsa_port_offloads_bridge(other_dp, &bridge)) + continue; - port_bitmap |= BIT(i); - } + if (priv->ports[other_port].enable) + mt7530_set(priv, MT7530_PCR_P(other_port), + PCR_MATRIX(BIT(port))); + priv->ports[other_port].pm |= PCR_MATRIX(BIT(port)); + + port_bitmap |= BIT(other_port); } /* Add the all other ports to this port matrix. */ @@ -1236,7 +1240,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) /* This is called after .port_bridge_leave when leaving a VLAN-aware * bridge. Don't set standalone ports to fallback mode. */ - if (dsa_to_port(ds, port)->bridge_dev) + if (dsa_port_bridge_dev_get(dsa_to_port(ds, port))) mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, MT7530_PORT_FALLBACK_MODE); @@ -1299,26 +1303,30 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) static void mt7530_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge) { + struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; struct mt7530_priv *priv = ds->priv; - int i; mutex_lock(&priv->reg_mutex); - for (i = 0; i < MT7530_NUM_PORTS; i++) { + dsa_switch_for_each_user_port(other_dp, ds) { + int other_port = other_dp->index; + + if (dp == other_dp) + continue; + /* Remove this port from the port matrix of the other ports * in the same bridge. If the port is disabled, port matrix * is kept and not being setup until the port becomes enabled. */ - if (dsa_is_user_port(ds, i) && i != port) { - if (dsa_to_port(ds, i)->bridge_dev != bridge) - continue; - if (priv->ports[i].enable) - mt7530_clear(priv, MT7530_PCR_P(i), - PCR_MATRIX(BIT(port))); - priv->ports[i].pm &= ~PCR_MATRIX(BIT(port)); - } + if (!dsa_port_offloads_bridge(other_dp, &bridge)) + continue; + + if (priv->ports[other_port].enable) + mt7530_clear(priv, MT7530_PCR_P(other_port), + PCR_MATRIX(BIT(port))); + priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port)); } /* Set the cpu port to be the only one in the port matrix of diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index f00cbf5753b9..dced1505e6bf 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -471,6 +471,12 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port) u16 reg; int err; + /* The 88e6250 family does not have the PHY detect bit. Instead, + * report whether the port is internal. + */ + if (chip->info->family == MV88E6XXX_FAMILY_6250) + return port < chip->info->num_internal_phys; + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); if (err) { dev_err(chip->dev, @@ -692,44 +698,48 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, { struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_port *p; - int err; + int err = 0; p = &chip->ports[port]; - /* FIXME: is this the correct test? If we're in fixed mode on an - * internal port, why should we process this any different from - * PHY mode? On the other hand, the port may be automedia between - * an internal PHY and the serdes... - */ - if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port)) - return; - mv88e6xxx_reg_lock(chip); - /* In inband mode, the link may come up at any time while the link - * is not forced down. Force the link down while we reconfigure the - * interface mode. - */ - if (mode == MLO_AN_INBAND && p->interface != state->interface && - chip->info->ops->port_set_link) - chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN); - - err = mv88e6xxx_port_config_interface(chip, port, state->interface); - if (err && err != -EOPNOTSUPP) - goto err_unlock; - err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface, - state->advertising); - /* FIXME: we should restart negotiation if something changed - which - * is something we get if we convert to using phylinks PCS operations. - */ - if (err > 0) - err = 0; + if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) { + /* In inband mode, the link may come up at any time while the + * link is not forced down. Force the link down while we + * reconfigure the interface mode. + */ + if (mode == MLO_AN_INBAND && + p->interface != state->interface && + chip->info->ops->port_set_link) + chip->info->ops->port_set_link(chip, port, + LINK_FORCED_DOWN); + + err = mv88e6xxx_port_config_interface(chip, port, + state->interface); + if (err && err != -EOPNOTSUPP) + goto err_unlock; + + err = mv88e6xxx_serdes_pcs_config(chip, port, mode, + state->interface, + state->advertising); + /* FIXME: we should restart negotiation if something changed - + * which is something we get if we convert to using phylinks + * PCS operations. + */ + if (err > 0) + err = 0; + } /* Undo the forced down state above after completing configuration - * irrespective of its state on entry, which allows the link to come up. + * irrespective of its state on entry, which allows the link to come + * up in the in-band case where there is no separate SERDES. Also + * ensure that the link can come up if the PPU is in use and we are + * in PHY mode (we treat the PPU as an effective in-band mechanism.) */ - if (mode == MLO_AN_INBAND && p->interface != state->interface && - chip->info->ops->port_set_link) + if (chip->info->ops->port_set_link && + ((mode == MLO_AN_INBAND && p->interface != state->interface) || + (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port)))) chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); p->interface = state->interface; @@ -752,11 +762,10 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, ops = chip->info->ops; mv88e6xxx_reg_lock(chip); - /* Internal PHYs propagate their configuration directly to the MAC. - * External PHYs depend on whether the PPU is enabled for this port. + /* Force the link down if we know the port may not be automatically + * updated by the switch or if we are using fixed-link mode. */ - if (((!mv88e6xxx_phy_is_internal(ds, port) && - !mv88e6xxx_port_ppu_updates(chip, port)) || + if ((!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) && ops->port_sync_link) err = ops->port_sync_link(chip, port, mode, false); mv88e6xxx_reg_unlock(chip); @@ -779,11 +788,11 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port, ops = chip->info->ops; mv88e6xxx_reg_lock(chip); - /* Internal PHYs propagate their configuration directly to the MAC. - * External PHYs depend on whether the PPU is enabled for this port. + /* Configure and force the link up if we know that the port may not + * automatically updated by the switch or if we are using fixed-link + * mode. */ - if ((!mv88e6xxx_phy_is_internal(ds, port) && - !mv88e6xxx_port_ppu_updates(chip, port)) || + if (!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) { /* FIXME: for an automedia port, should we force the link * down here - what if the link comes up due to "other" media @@ -1228,8 +1237,7 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) { struct dsa_switch *ds = chip->ds; struct dsa_switch_tree *dst = ds->dst; - struct net_device *br; - struct dsa_port *dp; + struct dsa_port *dp, *other_dp; bool found = false; u16 pvlan; @@ -1238,11 +1246,9 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) list_for_each_entry(dp, &dst->ports, list) { if (dp->ds->index == dev && dp->index == port) { /* dp might be a DSA link or a user port, so it - * might or might not have a bridge_dev - * pointer. Use the "found" variable for both - * cases. + * might or might not have a bridge. + * Use the "found" variable for both cases. */ - br = dp->bridge_dev; found = true; break; } @@ -1250,13 +1256,14 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) /* dev is a virtual bridge */ } else { list_for_each_entry(dp, &dst->ports, list) { - if (dp->bridge_num < 0) + unsigned int bridge_num = dsa_port_bridge_num_get(dp); + + if (!bridge_num) continue; - if (dp->bridge_num + 1 + dst->last_switch != dev) + if (bridge_num + dst->last_switch != dev) continue; - br = dp->bridge_dev; found = true; break; } @@ -1275,12 +1282,11 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) /* Frames from user ports can egress any local DSA links and CPU ports, * as well as any local member of their bridge group. */ - list_for_each_entry(dp, &dst->ports, list) - if (dp->ds == ds && - (dp->type == DSA_PORT_TYPE_CPU || - dp->type == DSA_PORT_TYPE_DSA || - (br && dp->bridge_dev == br))) - pvlan |= BIT(dp->index); + dsa_switch_for_each_port(other_dp, ds) + if (other_dp->type == DSA_PORT_TYPE_CPU || + other_dp->type == DSA_PORT_TYPE_DSA || + dsa_port_bridge_same(dp, other_dp)) + pvlan |= BIT(other_dp->index); return pvlan; } @@ -1647,12 +1653,13 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, u16 vid) { + struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_vtu_entry vlan; - int i, err; + int err; /* DSA and CPU ports have to be members of multiple vlans */ - if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) + if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) return 0; err = mv88e6xxx_vtu_get(chip, vid, &vlan); @@ -1662,27 +1669,22 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, if (!vlan.valid) return 0; - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { - if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) - continue; - - if (!dsa_to_port(ds, i)->slave) - continue; + dsa_switch_for_each_user_port(other_dp, ds) { + struct net_device *other_br; - if (vlan.member[i] == + if (vlan.member[other_dp->index] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) continue; - if (dsa_to_port(ds, i)->bridge_dev == - dsa_to_port(ds, port)->bridge_dev) + if (dsa_port_bridge_same(dp, other_dp)) break; /* same bridge, check next VLAN */ - if (!dsa_to_port(ds, i)->bridge_dev) + other_br = dsa_port_bridge_dev_get(other_dp); + if (!other_br) continue; dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n", - port, vlan.vid, i, - netdev_name(dsa_to_port(ds, i)->bridge_dev)); + port, vlan.vid, other_dp->index, netdev_name(other_br)); return -EOPNOTSUPP; } @@ -1692,13 +1694,14 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port) { struct dsa_port *dp = dsa_to_port(chip->ds, port); + struct net_device *br = dsa_port_bridge_dev_get(dp); struct mv88e6xxx_port *p = &chip->ports[port]; u16 pvid = MV88E6XXX_VID_STANDALONE; bool drop_untagged = false; int err; - if (dp->bridge_dev) { - if (br_vlan_enabled(dp->bridge_dev)) { + if (br) { + if (br_vlan_enabled(br)) { pvid = p->bridge_pvid.vid; drop_untagged = !p->bridge_pvid.valid; } else { @@ -2416,7 +2419,7 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, } static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip, - struct net_device *br) + struct dsa_bridge bridge) { struct dsa_switch *ds = chip->ds; struct dsa_switch_tree *dst = ds->dst; @@ -2424,7 +2427,7 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip, int err; list_for_each_entry(dp, &dst->ports, list) { - if (dp->bridge_dev == br) { + if (dsa_port_offloads_bridge(dp, &bridge)) { if (dp->ds == ds) { /* This is a local bridge group member, * remap its Port VLAN Map. @@ -2447,15 +2450,29 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip, return 0; } +/* Treat the software bridge as a virtual single-port switch behind the + * CPU and map in the PVT. First dst->last_switch elements are taken by + * physical switches, so start from beyond that range. + */ +static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds, + unsigned int bridge_num) +{ + u8 dev = bridge_num + ds->dst->last_switch; + struct mv88e6xxx_chip *chip = ds->priv; + + return mv88e6xxx_pvt_map(chip, dev, 0); +} + static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct mv88e6xxx_chip *chip = ds->priv; int err; mv88e6xxx_reg_lock(chip); - err = mv88e6xxx_bridge_map(chip, br); + err = mv88e6xxx_bridge_map(chip, bridge); if (err) goto unlock; @@ -2463,6 +2480,14 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, if (err) goto unlock; + if (mv88e6xxx_has_pvt(chip)) { + err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num); + if (err) + goto unlock; + + *tx_fwd_offload = true; + } + unlock: mv88e6xxx_reg_unlock(chip); @@ -2470,14 +2495,18 @@ unlock: } static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { struct mv88e6xxx_chip *chip = ds->priv; int err; mv88e6xxx_reg_lock(chip); - if (mv88e6xxx_bridge_map(chip, br) || + if (bridge.tx_fwd_offload && + mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num)) + dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n"); + + if (mv88e6xxx_bridge_map(chip, bridge) || mv88e6xxx_port_vlan_map(chip, port)) dev_err(ds->dev, "failed to remap in-chip Port VLAN\n"); @@ -2492,7 +2521,7 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int tree_index, int sw_index, - int port, struct net_device *br) + int port, struct dsa_bridge bridge) { struct mv88e6xxx_chip *chip = ds->priv; int err; @@ -2502,6 +2531,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, mv88e6xxx_reg_lock(chip); err = mv88e6xxx_pvt_map(chip, sw_index, port); + err = err ? : mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num); mv88e6xxx_reg_unlock(chip); return err; @@ -2509,7 +2539,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int tree_index, int sw_index, - int port, struct net_device *br) + int port, struct dsa_bridge bridge) { struct mv88e6xxx_chip *chip = ds->priv; @@ -2517,49 +2547,12 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, return; mv88e6xxx_reg_lock(chip); - if (mv88e6xxx_pvt_map(chip, sw_index, port)) + if (mv88e6xxx_pvt_map(chip, sw_index, port) || + mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num)) dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n"); mv88e6xxx_reg_unlock(chip); } -/* Treat the software bridge as a virtual single-port switch behind the - * CPU and map in the PVT. First dst->last_switch elements are taken by - * physical switches, so start from beyond that range. - */ -static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds, - int bridge_num) -{ - u8 dev = bridge_num + ds->dst->last_switch + 1; - struct mv88e6xxx_chip *chip = ds->priv; - int err; - - mv88e6xxx_reg_lock(chip); - err = mv88e6xxx_pvt_map(chip, dev, 0); - mv88e6xxx_reg_unlock(chip); - - return err; -} - -static int mv88e6xxx_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, - struct net_device *br, - int bridge_num) -{ - return mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num); -} - -static void mv88e6xxx_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, - struct net_device *br, - int bridge_num) -{ - int err; - - err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num); - if (err) { - dev_err(ds->dev, "failed to remap cross-chip Port VLAN: %pe\n", - ERR_PTR(err)); - } -} - static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip) { if (chip->info->ops->reset) @@ -3186,8 +3179,8 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) * time. */ if (mv88e6xxx_has_pvt(chip)) - ds->num_fwd_offloading_bridges = MV88E6XXX_MAX_PVT_SWITCHES - - ds->dst->last_switch - 1; + ds->max_num_bridges = MV88E6XXX_MAX_PVT_SWITCHES - + ds->dst->last_switch - 1; mv88e6xxx_reg_lock(chip); @@ -6279,8 +6272,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .crosschip_lag_change = mv88e6xxx_crosschip_lag_change, .crosschip_lag_join = mv88e6xxx_crosschip_lag_join, .crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave, - .port_bridge_tx_fwd_offload = mv88e6xxx_bridge_tx_fwd_offload, - .port_bridge_tx_fwd_unoffload = mv88e6xxx_bridge_tx_fwd_unoffload, }; static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index 8f74ffc7a279..389f8a6ec0ab 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -100,10 +100,6 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, */ clear_bit_unlock(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state); - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: tstamp_enable = false; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 6ea003678798..2b05ead515cd 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -50,11 +50,22 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip, } static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, - u16 status, u16 lpa, + u16 ctrl, u16 status, u16 lpa, struct phylink_link_state *state) { + state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK); + if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) { - state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK); + /* The Spped and Duplex Resolved register is 1 if AN is enabled + * and complete, or if AN is disabled. So with disabled AN we + * still get here on link up. But we want to set an_complete + * only if AN was enabled, thus we look at BMCR_ANENABLE. + * (According to 802.3-2008 section 22.2.4.2.10, we should be + * able to get this same value from BMSR_ANEGCAPABLE, but tests + * show that these Marvell PHYs don't conform to this part of + * the specificaion - BMSR_ANEGCAPABLE is simply always 1.) + */ + state->an_complete = !!(ctrl & BMCR_ANENABLE); state->duplex = status & MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ? DUPLEX_FULL : DUPLEX_HALF; @@ -81,6 +92,18 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, dev_err(chip->dev, "invalid PHY speed\n"); return -EINVAL; } + } else if (state->link && + state->interface != PHY_INTERFACE_MODE_SGMII) { + /* If Speed and Duplex Resolved register is 0 and link is up, it + * means that AN was enabled, but link partner had it disabled + * and the PHY invoked the Auto-Negotiation Bypass feature and + * linked anyway. + */ + state->duplex = DUPLEX_FULL; + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + state->speed = SPEED_2500; + else + state->speed = SPEED_1000; } else { state->link = false; } @@ -168,9 +191,15 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status; + u16 lpa, status, ctrl; int err; + err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl); + if (err) { + dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + return err; + } + err = mv88e6352_serdes_read(chip, 0x11, &status); if (err) { dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err); @@ -183,7 +212,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); } int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, @@ -801,7 +830,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool up) { u8 cmode = chip->ports[port].cmode; - int err = 0; + int err; switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: @@ -813,6 +842,9 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, case MV88E6XXX_PORT_STS_CMODE_RXAUI: err = mv88e6390_serdes_power_10g(chip, lane, up); break; + default: + err = -EINVAL; + break; } if (!err && up) @@ -883,10 +915,17 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status; + u16 lpa, status, ctrl; int err; err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_BMCR, &ctrl); + if (err) { + dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + return err; + } + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, MV88E6390_SGMII_PHY_STATUS, &status); if (err) { dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err); @@ -900,7 +939,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); } static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, @@ -1271,9 +1310,31 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) } } -static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) +static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane, + bool on) { - u16 reg, pcs; + u16 reg; + int err; + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_CTRL1, ®); + if (err) + return err; + + if (on) + reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN | + MV88E6393X_SERDES_CTRL1_RX_PDOWN); + else + reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN | + MV88E6393X_SERDES_CTRL1_RX_PDOWN; + + return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_CTRL1, reg); +} + +static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane) +{ + u16 reg; int err; /* mv88e6393x family errata 4.6: @@ -1284,26 +1345,45 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) * It seems that after this workaround the SERDES is automatically * powered up (the bit is cleared), so power it down. */ - if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE || - lane == MV88E6393X_PORT10_LANE) { - err = mv88e6390_serdes_read(chip, lane, - MDIO_MMD_PHYXS, - MV88E6393X_SERDES_POC, ®); - if (err) - return err; + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, ®); + if (err) + return err; - reg &= ~MV88E6393X_SERDES_POC_PDOWN; - reg |= MV88E6393X_SERDES_POC_RESET; + reg &= ~MV88E6393X_SERDES_POC_PDOWN; + reg |= MV88E6393X_SERDES_POC_RESET; - err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_SERDES_POC, reg); - if (err) - return err; + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, reg); + if (err) + return err; - err = mv88e6390_serdes_power_sgmii(chip, lane, false); - if (err) - return err; - } + err = mv88e6390_serdes_power_sgmii(chip, lane, false); + if (err) + return err; + + return mv88e6393x_serdes_power_lane(chip, lane, false); +} + +int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip) +{ + int err; + + err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE); + if (err) + return err; + + err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE); + if (err) + return err; + + return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE); +} + +static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane) +{ + u16 reg, pcs; + int err; /* mv88e6393x family errata 4.8: * When a SERDES port is operating in 1000BASE-X or SGMII mode link may @@ -1334,38 +1414,152 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) MV88E6393X_ERRATA_4_8_REG, reg); } -int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip) +static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane, + u8 cmode) +{ + static const struct { + u16 dev, reg, val, mask; + } fixes[] = { + { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff }, + { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff }, + { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff }, + { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f }, + { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 }, + { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff }, + { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC, + MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET }, + }; + int err, i; + u16 reg; + + /* mv88e6393x family errata 5.2: + * For optimal signal integrity the following sequence should be applied + * to SERDES operating in 10G mode. These registers only apply to 10G + * operation and have no effect on other speeds. + */ + if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER) + return 0; + + for (i = 0; i < ARRAY_SIZE(fixes); ++i) { + err = mv88e6390_serdes_read(chip, lane, fixes[i].dev, + fixes[i].reg, ®); + if (err) + return err; + + reg &= ~fixes[i].mask; + reg |= fixes[i].val; + + err = mv88e6390_serdes_write(chip, lane, fixes[i].dev, + fixes[i].reg, reg); + if (err) + return err; + } + + return 0; +} + +static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip, + int lane, u8 cmode, bool on) { + u16 reg; int err; - err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE); + if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX) + return 0; + + /* Inband AN is broken on Amethyst in 2500base-x mode when set by + * standard mechanism (via cmode). + * We can get around this by configuring the PCS mode to 1000base-x + * and then writing value 0x58 to register 1e.8000. (This must be done + * while SerDes receiver and transmitter are disabled, which is, when + * this function is called.) + * It seem that when we do this configuration to 2500base-x mode (by + * changing PCS mode to 1000base-x and frequency to 3.125 GHz from + * 1.25 GHz) and then configure to sgmii or 1000base-x, the device + * thinks that it already has SerDes at 1.25 GHz and does not change + * the 1e.8000 register, leaving SerDes at 3.125 GHz. + * To avoid this, change PCS mode back to 2500base-x when disabling + * SerDes from 2500base-x mode. + */ + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, ®); + if (err) + return err; + + reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN); + if (on) + reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX | + MV88E6393X_SERDES_POC_AN; + else + reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX; + reg |= MV88E6393X_SERDES_POC_RESET; + + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, reg); if (err) return err; - err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE); + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58); if (err) return err; - return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE); + return 0; } int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool on) { u8 cmode = chip->ports[port].cmode; + int err; if (port != 0 && port != 9 && port != 10) return -EOPNOTSUPP; + if (on) { + err = mv88e6393x_serdes_erratum_4_8(chip, lane); + if (err) + return err; + + err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode); + if (err) + return err; + + err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode, + true); + if (err) + return err; + + err = mv88e6393x_serdes_power_lane(chip, lane, true); + if (err) + return err; + } + switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: case MV88E6XXX_PORT_STS_CMODE_1000BASEX: case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - return mv88e6390_serdes_power_sgmii(chip, lane, on); + err = mv88e6390_serdes_power_sgmii(chip, lane, on); + break; case MV88E6393X_PORT_STS_CMODE_5GBASER: case MV88E6393X_PORT_STS_CMODE_10GBASER: - return mv88e6390_serdes_power_10g(chip, lane, on); + err = mv88e6390_serdes_power_10g(chip, lane, on); + break; + default: + err = -EINVAL; + break; } - return 0; + if (err) + return err; + + if (!on) { + err = mv88e6393x_serdes_power_lane(chip, lane, false); + if (err) + return err; + + err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode, + false); + } + + return err; } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index cbb3ba30caea..8dd8ed225b45 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -93,6 +93,10 @@ #define MV88E6393X_SERDES_POC_PCS_MASK 0x0007 #define MV88E6393X_SERDES_POC_RESET BIT(15) #define MV88E6393X_SERDES_POC_PDOWN BIT(5) +#define MV88E6393X_SERDES_POC_AN BIT(3) +#define MV88E6393X_SERDES_CTRL1 0xf003 +#define MV88E6393X_SERDES_CTRL1_TX_PDOWN BIT(9) +#define MV88E6393X_SERDES_CTRL1_RX_PDOWN BIT(8) #define MV88E6393X_ERRATA_4_8_REG 0xF074 #define MV88E6393X_ERRATA_4_8_BIT BIT(14) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 0e102caddb73..f4fc403fbc1e 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -298,8 +298,11 @@ static int felix_setup_mmio_filtering(struct felix *felix) } } - if (cpu < 0) + if (cpu < 0) { + kfree(tagging_rule); + kfree(redirect_rule); return -EINVAL; + } tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); @@ -706,21 +709,21 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port, } static int felix_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, bool *tx_fwd_offload) { struct ocelot *ocelot = ds->priv; - ocelot_port_bridge_join(ocelot, port, br); + ocelot_port_bridge_join(ocelot, port, bridge.dev); return 0; } static void felix_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { struct ocelot *ocelot = ds->priv; - ocelot_port_bridge_leave(ocelot, port, br); + ocelot_port_bridge_leave(ocelot, port, bridge.dev); } static int felix_lag_join(struct dsa_switch *ds, int port, @@ -828,7 +831,7 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port, struct felix *felix = ocelot_to_felix(ocelot); struct dsa_port *dp = dsa_to_port(ds, port); - if (felix->pcs[port]) + if (felix->pcs && felix->pcs[port]) phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs); } @@ -1028,7 +1031,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) res.start += felix->switch_base; res.end += felix->switch_base; - target = ocelot_regmap_init(ocelot, &res); + target = felix->info->init_regmap(ocelot, &res); if (IS_ERR(target)) { dev_err(ocelot->dev, "Failed to map device memory space\n"); @@ -1065,7 +1068,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) res.start += felix->switch_base; res.end += felix->switch_base; - target = ocelot_regmap_init(ocelot, &res); + target = felix->info->init_regmap(ocelot, &res); if (IS_ERR(target)) { dev_err(ocelot->dev, "Failed to map memory space for port %d\n", @@ -1152,38 +1155,22 @@ static void felix_port_deferred_xmit(struct kthread_work *work) kfree(xmit_work); } -static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port) +static int felix_connect_tag_protocol(struct dsa_switch *ds, + enum dsa_tag_protocol proto) { - struct dsa_port *dp = dsa_to_port(ds, port); - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - struct felix_port *felix_port; + struct ocelot_8021q_tagger_data *tagger_data; - if (!dsa_port_is_user(dp)) + switch (proto) { + case DSA_TAG_PROTO_OCELOT_8021Q: + tagger_data = ocelot_8021q_tagger_data(ds); + tagger_data->xmit_work_fn = felix_port_deferred_xmit; return 0; - - felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL); - if (!felix_port) - return -ENOMEM; - - felix_port->xmit_worker = felix->xmit_worker; - felix_port->xmit_work_fn = felix_port_deferred_xmit; - - dp->priv = felix_port; - - return 0; -} - -static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port) -{ - struct dsa_port *dp = dsa_to_port(ds, port); - struct felix_port *felix_port = dp->priv; - - if (!felix_port) - return; - - dp->priv = NULL; - kfree(felix_port); + case DSA_TAG_PROTO_OCELOT: + case DSA_TAG_PROTO_SEVILLE: + return 0; + default: + return -EPROTONOSUPPORT; + } } /* Hardware initialization done here so that we can allocate structures with @@ -1214,12 +1201,6 @@ static int felix_setup(struct dsa_switch *ds) } } - felix->xmit_worker = kthread_create_worker(0, "felix_xmit"); - if (IS_ERR(felix->xmit_worker)) { - err = PTR_ERR(felix->xmit_worker); - goto out_deinit_timestamp; - } - for (port = 0; port < ds->num_ports; port++) { if (dsa_is_unused_port(ds, port)) continue; @@ -1230,14 +1211,6 @@ static int felix_setup(struct dsa_switch *ds) * bits of vlan tag. */ felix_port_qos_map_init(ocelot, port); - - err = felix_port_setup_tagger_data(ds, port); - if (err) { - dev_err(ds->dev, - "port %d failed to set up tagger data: %pe\n", - port, ERR_PTR(err)); - goto out_deinit_ports; - } } err = ocelot_devlink_sb_register(ocelot); @@ -1265,13 +1238,9 @@ out_deinit_ports: if (dsa_is_unused_port(ds, port)) continue; - felix_port_teardown_tagger_data(ds, port); ocelot_deinit_port(ocelot, port); } - kthread_destroy_worker(felix->xmit_worker); - -out_deinit_timestamp: ocelot_deinit_timestamp(ocelot); ocelot_deinit(ocelot); @@ -1300,12 +1269,9 @@ static void felix_teardown(struct dsa_switch *ds) if (dsa_is_unused_port(ds, port)) continue; - felix_port_teardown_tagger_data(ds, port); ocelot_deinit_port(ocelot, port); } - kthread_destroy_worker(felix->xmit_worker); - ocelot_devlink_sb_unregister(ocelot); ocelot_deinit_timestamp(ocelot); ocelot_deinit(ocelot); @@ -1645,6 +1611,7 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port, const struct dsa_switch_ops felix_switch_ops = { .get_tag_protocol = felix_get_tag_protocol, .change_tag_protocol = felix_change_tag_protocol, + .connect_tag_protocol = felix_connect_tag_protocol, .setup = felix_setup, .teardown = felix_teardown, .set_ageing_time = felix_set_ageing_time, diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index dfe08dddd262..515bddc012c0 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -25,8 +25,6 @@ struct felix_info { u16 vcap_pol_max; u16 vcap_pol_base2; u16 vcap_pol_max2; - int switch_pci_bar; - int imdio_pci_bar; const struct ptp_clock_info *ptp_caps; /* Some Ocelot switches are integrated into the SoC without the @@ -52,6 +50,8 @@ struct felix_info { enum tc_setup_type type, void *type_data); void (*port_sched_speed_set)(struct ocelot *ocelot, int port, u32 speed); + struct regmap *(*init_regmap)(struct ocelot *ocelot, + struct resource *res); }; extern const struct dsa_switch_ops felix_switch_ops; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 9add86eda7e3..110d6c403bdd 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -21,6 +21,8 @@ #define VSC9959_TAS_GCL_ENTRY_MAX 63 #define VSC9959_VCAP_POLICER_BASE 63 #define VSC9959_VCAP_POLICER_MAX 383 +#define VSC9959_SWITCH_PCI_BAR 4 +#define VSC9959_IMDIO_PCI_BAR 0 static const u32 vsc9959_ana_regmap[] = { REG(ANA_ADVLEARN, 0x0089a0), @@ -1503,12 +1505,10 @@ static int vsc9959_stream_table_add(struct ocelot *ocelot, struct felix_stream *stream_entry; int ret; - stream_entry = kzalloc(sizeof(*stream_entry), GFP_KERNEL); + stream_entry = kmemdup(stream, sizeof(*stream_entry), GFP_KERNEL); if (!stream_entry) return -ENOMEM; - memcpy(stream_entry, stream, sizeof(*stream_entry)); - if (!stream->dummy) { ret = vsc9959_mact_stream_set(ocelot, stream_entry, extack); if (ret) { @@ -1624,11 +1624,10 @@ static int vsc9959_psfp_sfi_list_add(struct ocelot *ocelot, struct felix_stream_filter *sfi_entry; int ret; - sfi_entry = kzalloc(sizeof(*sfi_entry), GFP_KERNEL); + sfi_entry = kmemdup(sfi, sizeof(*sfi_entry), GFP_KERNEL); if (!sfi_entry) return -ENOMEM; - memcpy(sfi_entry, sfi, sizeof(*sfi_entry)); refcount_set(&sfi_entry->refcount, 1); ret = vsc9959_psfp_sfi_set(ocelot, sfi_entry); @@ -2230,8 +2229,6 @@ static const struct felix_info felix_info_vsc9959 = { .num_mact_rows = 2048, .num_ports = 6, .num_tx_queues = OCELOT_NUM_TC, - .switch_pci_bar = 4, - .imdio_pci_bar = 0, .quirk_no_xtr_irq = true, .ptp_caps = &vsc9959_ptp_caps, .mdio_bus_alloc = vsc9959_mdio_bus_alloc, @@ -2240,6 +2237,7 @@ static const struct felix_info felix_info_vsc9959 = { .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode, .port_setup_tc = vsc9959_port_setup_tc, .port_sched_speed_set = vsc9959_sched_speed_set, + .init_regmap = ocelot_regmap_init, }; static irqreturn_t felix_irq_handler(int irq, void *data) @@ -2290,10 +2288,8 @@ static int felix_pci_probe(struct pci_dev *pdev, ocelot->dev = &pdev->dev; ocelot->num_flooding_pgids = OCELOT_NUM_TC; felix->info = &felix_info_vsc9959; - felix->switch_base = pci_resource_start(pdev, - felix->info->switch_pci_bar); - felix->imdio_base = pci_resource_start(pdev, - felix->info->imdio_pci_bar); + felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR); + felix->imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR); pci_set_master(pdev); diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index b9be889016ce..e110550e3507 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -1104,6 +1104,7 @@ static const struct felix_info seville_info_vsc9953 = { .mdio_bus_free = vsc9953_mdio_bus_free, .phylink_validate = vsc9953_phylink_validate, .prevalidate_phy_mode = vsc9953_prevalidate_phy_mode, + .init_regmap = ocelot_regmap_init, }; static int seville_probe(struct platform_device *pdev) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 96a7fbf8700c..039694518788 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -1810,8 +1810,9 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) QCA8K_PORT_LOOKUP_STATE_MASK, stp_state); } -static int -qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) +static int qca8k_port_bridge_join(struct dsa_switch *ds, int port, + struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int port_mask, cpu_port; @@ -1823,7 +1824,7 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) for (i = 0; i < QCA8K_NUM_PORTS; i++) { if (dsa_is_cpu_port(ds, i)) continue; - if (dsa_to_port(ds, i)->bridge_dev != br) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* Add this port to the portvlan mask of the other ports * in the bridge @@ -1844,8 +1845,8 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) return ret; } -static void -qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) +static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_bridge bridge) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int cpu_port, i; @@ -1855,7 +1856,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) for (i = 0; i < QCA8K_NUM_PORTS; i++) { if (dsa_is_cpu_port(ds, i)) continue; - if (dsa_to_port(ds, i)->bridge_dev != br) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* Remove this port to the portvlan mask of the other ports * in the bridge diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c index bb65576ebf3c..2ac68c867636 100644 --- a/drivers/net/dsa/rtl8365mb.c +++ b/drivers/net/dsa/rtl8365mb.c @@ -107,6 +107,7 @@ #define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112 /* Family-specific data and limits */ +#define RTL8365MB_PHYADDRMAX 7 #define RTL8365MB_NUM_PHYREGS 32 #define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1) #define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1) @@ -176,7 +177,7 @@ #define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01 #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02 #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0) -#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5) +#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(7, 5) #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8) #define RTL8365MB_PHY_BASE 0x2000 #define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03 @@ -679,6 +680,9 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum) u16 val; int ret; + if (phy > RTL8365MB_PHYADDRMAX) + return -EINVAL; + if (regnum > RTL8365MB_PHYREGMAX) return -EINVAL; @@ -704,6 +708,9 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum, u32 ocp_addr; int ret; + if (phy > RTL8365MB_PHYADDRMAX) + return -EINVAL; + if (regnum > RTL8365MB_PHYREGMAX) return -EINVAL; diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c index 03deacd83e61..ecc19bd5115f 100644 --- a/drivers/net/dsa/rtl8366rb.c +++ b/drivers/net/dsa/rtl8366rb.c @@ -1186,7 +1186,8 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port) static int rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct realtek_smi *smi = ds->priv; unsigned int port_bitmap = 0; @@ -1198,7 +1199,7 @@ rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port, if (i == port) continue; /* Not on this bridge */ - if (dsa_to_port(ds, i)->bridge_dev != bridge) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* Join this port to each other port on the bridge */ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), @@ -1218,7 +1219,7 @@ rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port, static void rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge) { struct realtek_smi *smi = ds->priv; unsigned int port_bitmap = 0; @@ -1230,7 +1231,7 @@ rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port, if (i == port) continue; /* Not on this bridge */ - if (dsa_to_port(ds, i)->bridge_dev != bridge) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* Remove this port from any other port on the bridge */ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 21dba16af097..9ba2ec2b966d 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -249,6 +249,7 @@ struct sja1105_private { bool fixed_link[SJA1105_MAX_NUM_PORTS]; unsigned long ucast_egress_floods; unsigned long bcast_egress_floods; + unsigned long hwts_tx_en; const struct sja1105_info *info; size_t max_xfer_len; struct spi_device *spidev; @@ -256,11 +257,13 @@ struct sja1105_private { u16 bridge_pvid[SJA1105_MAX_NUM_PORTS]; u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS]; struct sja1105_flow_block flow_block; - struct sja1105_port ports[SJA1105_MAX_NUM_PORTS]; /* Serializes transmission of management frames so that * the switch doesn't confuse them with one another. */ struct mutex mgmt_lock; + /* PTP two-step TX timestamp ID, and its serialization lock */ + spinlock_t ts_id_lock; + u8 ts_id; /* Serializes access to the dynamic config interface */ struct mutex dynamic_config_lock; struct devlink_region **regions; @@ -269,7 +272,6 @@ struct sja1105_private { struct mii_bus *mdio_base_tx; struct mii_bus *mdio_pcs; struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS]; - struct sja1105_tagger_data tagger_data; struct sja1105_ptp_data ptp_data; struct sja1105_tas_data tas_data; }; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index c343effe2e96..b513713be610 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -118,13 +118,14 @@ static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) static int sja1105_commit_pvid(struct dsa_switch *ds, int port) { struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *br = dsa_port_bridge_dev_get(dp); struct sja1105_private *priv = ds->priv; struct sja1105_vlan_lookup_entry *vlan; bool drop_untagged = false; int match, rc; u16 pvid; - if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev)) + if (br && br_vlan_enabled(br)) pvid = priv->bridge_pvid[port]; else pvid = priv->tag_8021q_pvid[port]; @@ -1979,7 +1980,7 @@ static int sja1105_manage_flood_domains(struct sja1105_private *priv) } static int sja1105_bridge_member(struct dsa_switch *ds, int port, - struct net_device *br, bool member) + struct dsa_bridge bridge, bool member) { struct sja1105_l2_forwarding_entry *l2_fwd; struct sja1105_private *priv = ds->priv; @@ -2004,7 +2005,7 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port, */ if (i == port) continue; - if (dsa_to_port(ds, i)->bridge_dev != br) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; sja1105_port_allow_traffic(l2_fwd, i, port, member); sja1105_port_allow_traffic(l2_fwd, port, i, member); @@ -2073,15 +2074,31 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, } static int sja1105_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { - return sja1105_bridge_member(ds, port, br, true); + int rc; + + rc = sja1105_bridge_member(ds, port, bridge, true); + if (rc) + return rc; + + rc = dsa_tag_8021q_bridge_tx_fwd_offload(ds, port, bridge); + if (rc) { + sja1105_bridge_member(ds, port, bridge, false); + return rc; + } + + *tx_fwd_offload = true; + + return 0; } static void sja1105_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { - sja1105_bridge_member(ds, port, br, false); + dsa_tag_8021q_bridge_tx_fwd_unoffload(ds, port, bridge); + sja1105_bridge_member(ds, port, bridge, false); } #define BYTES_PER_KBIT (1000LL / 8) @@ -2587,8 +2604,9 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port, if (netif_is_bridge_master(upper)) { list_for_each_entry(dp, &dst->ports, list) { - if (dp->bridge_dev && dp->bridge_dev != upper && - br_vlan_enabled(dp->bridge_dev)) { + struct net_device *br = dsa_port_bridge_dev_get(dp); + + if (br && br != upper && br_vlan_enabled(br)) { NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported"); return -EBUSY; @@ -2599,18 +2617,6 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port, return 0; } -static void sja1105_port_disable(struct dsa_switch *ds, int port) -{ - struct sja1105_private *priv = ds->priv; - struct sja1105_port *sp = &priv->ports[port]; - - if (!dsa_is_user_port(ds, port)) - return; - - kthread_cancel_work_sync(&sp->xmit_work); - skb_queue_purge(&sp->xmit_queue); -} - static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, struct sk_buff *skb, bool takets) { @@ -2669,10 +2675,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, return NETDEV_TX_OK; } -#define work_to_port(work) \ - container_of((work), struct sja1105_port, xmit_work) -#define tagger_to_sja1105(t) \ - container_of((t), struct sja1105_private, tagger_data) +#define work_to_xmit_work(w) \ + container_of((w), struct sja1105_deferred_xmit_work, work) /* Deferred work is unfortunately necessary because setting up the management * route cannot be done from atomit context (SPI transfer takes a sleepable @@ -2680,25 +2684,41 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, */ static void sja1105_port_deferred_xmit(struct kthread_work *work) { - struct sja1105_port *sp = work_to_port(work); - struct sja1105_tagger_data *tagger_data = sp->data; - struct sja1105_private *priv = tagger_to_sja1105(tagger_data); - int port = sp - priv->ports; - struct sk_buff *skb; + struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work); + struct sk_buff *clone, *skb = xmit_work->skb; + struct dsa_switch *ds = xmit_work->dp->ds; + struct sja1105_private *priv = ds->priv; + int port = xmit_work->dp->index; - while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { - struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; + clone = SJA1105_SKB_CB(skb)->clone; - mutex_lock(&priv->mgmt_lock); + mutex_lock(&priv->mgmt_lock); - sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone); + sja1105_mgmt_xmit(ds, port, 0, skb, !!clone); - /* The clone, if there, was made by dsa_skb_tx_timestamp */ - if (clone) - sja1105_ptp_txtstamp_skb(priv->ds, port, clone); + /* The clone, if there, was made by dsa_skb_tx_timestamp */ + if (clone) + sja1105_ptp_txtstamp_skb(ds, port, clone); - mutex_unlock(&priv->mgmt_lock); - } + mutex_unlock(&priv->mgmt_lock); + + kfree(xmit_work); +} + +static int sja1105_connect_tag_protocol(struct dsa_switch *ds, + enum dsa_tag_protocol proto) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_tagger_data *tagger_data; + + if (proto != priv->info->tag_proto) + return -EPROTONOSUPPORT; + + tagger_data = sja1105_tagger_data(ds); + tagger_data->xmit_work_fn = sja1105_port_deferred_xmit; + tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp; + + return 0; } /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, @@ -3001,58 +3021,6 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port, return 0; } -static void sja1105_teardown_ports(struct sja1105_private *priv) -{ - struct dsa_switch *ds = priv->ds; - int port; - - for (port = 0; port < ds->num_ports; port++) { - struct sja1105_port *sp = &priv->ports[port]; - - if (sp->xmit_worker) - kthread_destroy_worker(sp->xmit_worker); - } -} - -static int sja1105_setup_ports(struct sja1105_private *priv) -{ - struct sja1105_tagger_data *tagger_data = &priv->tagger_data; - struct dsa_switch *ds = priv->ds; - int port, rc; - - /* Connections between dsa_port and sja1105_port */ - for (port = 0; port < ds->num_ports; port++) { - struct sja1105_port *sp = &priv->ports[port]; - struct dsa_port *dp = dsa_to_port(ds, port); - struct kthread_worker *worker; - struct net_device *slave; - - if (!dsa_port_is_user(dp)) - continue; - - dp->priv = sp; - sp->data = tagger_data; - slave = dp->slave; - kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); - worker = kthread_create_worker(0, "%s_xmit", slave->name); - if (IS_ERR(worker)) { - rc = PTR_ERR(worker); - dev_err(ds->dev, - "failed to create deferred xmit thread: %d\n", - rc); - goto out_destroy_workers; - } - sp->xmit_worker = worker; - skb_queue_head_init(&sp->xmit_queue); - } - - return 0; - -out_destroy_workers: - sja1105_teardown_ports(priv); - return rc; -} - /* The programming model for the SJA1105 switch is "all-at-once" via static * configuration tables. Some of these can be dynamically modified at runtime, * but not the xMII mode parameters table. @@ -3098,10 +3066,6 @@ static int sja1105_setup(struct dsa_switch *ds) } } - rc = sja1105_setup_ports(priv); - if (rc) - goto out_static_config_free; - sja1105_tas_setup(ds); sja1105_flower_setup(ds); @@ -3139,7 +3103,7 @@ static int sja1105_setup(struct dsa_switch *ds) ds->vlan_filtering_is_global = true; ds->untag_bridge_pvid = true; /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */ - ds->num_fwd_offloading_bridges = 7; + ds->max_num_bridges = 7; /* Advertise the 8 egress queues */ ds->num_tx_queues = SJA1105_NUM_TC; @@ -3158,7 +3122,6 @@ out_ptp_clock_unregister: out_flower_teardown: sja1105_flower_teardown(ds); sja1105_tas_teardown(ds); - sja1105_teardown_ports(priv); out_static_config_free: sja1105_static_config_free(&priv->static_config); @@ -3178,12 +3141,12 @@ static void sja1105_teardown(struct dsa_switch *ds) sja1105_ptp_clock_unregister(ds); sja1105_flower_teardown(ds); sja1105_tas_teardown(ds); - sja1105_teardown_ports(priv); sja1105_static_config_free(&priv->static_config); } static const struct dsa_switch_ops sja1105_switch_ops = { .get_tag_protocol = sja1105_get_tag_protocol, + .connect_tag_protocol = sja1105_connect_tag_protocol, .setup = sja1105_setup, .teardown = sja1105_teardown, .set_ageing_time = sja1105_set_ageing_time, @@ -3197,7 +3160,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .get_ethtool_stats = sja1105_get_ethtool_stats, .get_sset_count = sja1105_get_sset_count, .get_ts_info = sja1105_get_ts_info, - .port_disable = sja1105_port_disable, .port_fdb_dump = sja1105_fdb_dump, .port_fdb_add = sja1105_fdb_add, .port_fdb_del = sja1105_fdb_del, @@ -3228,8 +3190,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add, .tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del, .port_prechangeupper = sja1105_prechangeupper, - .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload, - .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload, }; static const struct of_device_id sja1105_dt_ids[]; @@ -3367,6 +3327,7 @@ static int sja1105_probe(struct spi_device *spi) mutex_init(&priv->ptp_data.lock); mutex_init(&priv->dynamic_config_lock); mutex_init(&priv->mgmt_lock); + spin_lock_init(&priv->ts_id_lock); rc = sja1105_parse_dt(priv); if (rc < 0) { diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index 54396992a919..be3068a935af 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -58,13 +58,12 @@ enum sja1105_ptp_clk_mode { #define ptp_data_to_sja1105(d) \ container_of((d), struct sja1105_private, ptp_data) -/* Must be called only with priv->tagger_data.state bit - * SJA1105_HWTS_RX_EN cleared +/* Must be called only while the RX timestamping state of the tagger + * is turned off */ static int sja1105_change_rxtstamping(struct sja1105_private *priv, bool on) { - struct sja1105_tagger_data *tagger_data = &priv->tagger_data; struct sja1105_ptp_data *ptp_data = &priv->ptp_data; struct sja1105_general_params_entry *general_params; struct sja1105_table *table; @@ -74,13 +73,8 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv, general_params->send_meta1 = on; general_params->send_meta0 = on; - /* Initialize the meta state machine to a known state */ - if (priv->tagger_data.stampable_skb) { - kfree_skb(priv->tagger_data.stampable_skb); - priv->tagger_data.stampable_skb = NULL; - } ptp_cancel_worker_sync(ptp_data->clock); - skb_queue_purge(&tagger_data->skb_txtstamp_queue); + skb_queue_purge(&ptp_data->skb_txtstamp_queue); skb_queue_purge(&ptp_data->skb_rxtstamp_queue); return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING); @@ -88,6 +82,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv, int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) { + struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds); struct sja1105_private *priv = ds->priv; struct hwtstamp_config config; bool rx_on; @@ -98,10 +93,10 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) switch (config.tx_type) { case HWTSTAMP_TX_OFF: - priv->ports[port].hwts_tx_en = false; + priv->hwts_tx_en &= ~BIT(port); break; case HWTSTAMP_TX_ON: - priv->ports[port].hwts_tx_en = true; + priv->hwts_tx_en |= BIT(port); break; default: return -ERANGE; @@ -116,8 +111,8 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) break; } - if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) { - clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); + if (rx_on != tagger_data->rxtstamp_get_state(ds)) { + tagger_data->rxtstamp_set_state(ds, false); rc = sja1105_change_rxtstamping(priv, rx_on); if (rc < 0) { @@ -126,7 +121,7 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) return rc; } if (rx_on) - set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); + tagger_data->rxtstamp_set_state(ds, true); } if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) @@ -136,15 +131,16 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr) { + struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds); struct sja1105_private *priv = ds->priv; struct hwtstamp_config config; config.flags = 0; - if (priv->ports[port].hwts_tx_en) + if (priv->hwts_tx_en & BIT(port)) config.tx_type = HWTSTAMP_TX_ON; else config.tx_type = HWTSTAMP_TX_OFF; - if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) + if (tagger_data->rxtstamp_get_state(ds)) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; else config.rx_filter = HWTSTAMP_FILTER_NONE; @@ -417,10 +413,11 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp) bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) { + struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds); struct sja1105_private *priv = ds->priv; struct sja1105_ptp_data *ptp_data = &priv->ptp_data; - if (!test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) + if (!tagger_data->rxtstamp_get_state(ds)) return false; /* We need to read the full PTP clock to reconstruct the Rx @@ -453,6 +450,39 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port, return priv->info->rxtstamp(ds, port, skb); } +void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id, + enum sja1110_meta_tstamp dir, u64 tstamp) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; + struct skb_shared_hwtstamps shwt = {0}; + + /* We don't care about RX timestamps on the CPU port */ + if (dir == SJA1110_META_TSTAMP_RX) + return; + + spin_lock(&ptp_data->skb_txtstamp_queue.lock); + + skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) { + if (SJA1105_SKB_CB(skb)->ts_id != ts_id) + continue; + + __skb_unlink(skb, &ptp_data->skb_txtstamp_queue); + skb_match = skb; + + break; + } + + spin_unlock(&ptp_data->skb_txtstamp_queue.lock); + + if (WARN_ON(!skb_match)) + return; + + shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp)); + skb_complete_tx_timestamp(skb_match, &shwt); +} + /* In addition to cloning the skb which is done by the common * sja1105_port_txtstamp, we need to generate a timestamp ID and save the * packet to the TX timestamping queue. @@ -461,22 +491,22 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) { struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; struct sja1105_private *priv = ds->priv; - struct sja1105_port *sp = &priv->ports[port]; + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; u8 ts_id; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - spin_lock(&sp->data->meta_lock); + spin_lock(&priv->ts_id_lock); - ts_id = sp->data->ts_id; + ts_id = priv->ts_id; /* Deal automatically with 8-bit wraparound */ - sp->data->ts_id++; + priv->ts_id++; SJA1105_SKB_CB(clone)->ts_id = ts_id; - spin_unlock(&sp->data->meta_lock); + spin_unlock(&priv->ts_id_lock); - skb_queue_tail(&sp->data->skb_txtstamp_queue, clone); + skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone); } /* Called from dsa_skb_tx_timestamp. This callback is just to clone @@ -486,10 +516,9 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) { struct sja1105_private *priv = ds->priv; - struct sja1105_port *sp = &priv->ports[port]; struct sk_buff *clone; - if (!sp->hwts_tx_en) + if (!(priv->hwts_tx_en & BIT(port))) return; clone = skb_clone_sk(skb); @@ -896,7 +925,6 @@ static struct ptp_pin_desc sja1105_ptp_pin = { int sja1105_ptp_clock_register(struct dsa_switch *ds) { struct sja1105_private *priv = ds->priv; - struct sja1105_tagger_data *tagger_data = &priv->tagger_data; struct sja1105_ptp_data *ptp_data = &priv->ptp_data; ptp_data->caps = (struct ptp_clock_info) { @@ -919,8 +947,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds) /* Only used on SJA1105 */ skb_queue_head_init(&ptp_data->skb_rxtstamp_queue); /* Only used on SJA1110 */ - skb_queue_head_init(&tagger_data->skb_txtstamp_queue); - spin_lock_init(&tagger_data->meta_lock); + skb_queue_head_init(&ptp_data->skb_txtstamp_queue); ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev); if (IS_ERR_OR_NULL(ptp_data->clock)) @@ -937,7 +964,6 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds) void sja1105_ptp_clock_unregister(struct dsa_switch *ds) { struct sja1105_private *priv = ds->priv; - struct sja1105_tagger_data *tagger_data = &priv->tagger_data; struct sja1105_ptp_data *ptp_data = &priv->ptp_data; if (IS_ERR_OR_NULL(ptp_data->clock)) @@ -945,7 +971,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds) del_timer_sync(&ptp_data->extts_timer); ptp_cancel_worker_sync(ptp_data->clock); - skb_queue_purge(&tagger_data->skb_txtstamp_queue); + skb_queue_purge(&ptp_data->skb_txtstamp_queue); skb_queue_purge(&ptp_data->skb_rxtstamp_queue); ptp_clock_unregister(ptp_data->clock); ptp_data->clock = NULL; diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h index 3ae6b9fdd492..416461ee95d2 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.h +++ b/drivers/net/dsa/sja1105/sja1105_ptp.h @@ -8,6 +8,21 @@ #if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) +/* Timestamps are in units of 8 ns clock ticks (equivalent to + * a fixed 125 MHz clock). + */ +#define SJA1105_TICK_NS 8 + +static inline s64 ns_to_sja1105_ticks(s64 ns) +{ + return ns / SJA1105_TICK_NS; +} + +static inline s64 sja1105_ticks_to_ns(s64 ticks) +{ + return ticks * SJA1105_TICK_NS; +} + /* Calculate the first base_time in the future that satisfies this * relationship: * @@ -62,6 +77,10 @@ struct sja1105_ptp_data { struct timer_list extts_timer; /* Used only on SJA1105 to reconstruct partial timestamps */ struct sk_buff_head skb_rxtstamp_queue; + /* Used on SJA1110 where meta frames are generated only for + * 2-step TX timestamps + */ + struct sk_buff_head skb_txtstamp_queue; struct ptp_clock_info caps; struct ptp_clock *clock; struct sja1105_ptp_cmd cmd; @@ -112,6 +131,9 @@ bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb); bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb); void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb); +void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id, + enum sja1110_meta_tstamp dir, u64 tstamp); + #else struct sja1105_ptp_cmd; @@ -178,6 +200,8 @@ static inline int sja1105_ptp_commit(struct dsa_switch *ds, #define sja1110_rxtstamp NULL #define sja1110_txtstamp NULL +#define sja1110_process_meta_tstamp NULL + #endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */ #endif /* _SJA1105_PTP_H */ diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index 4c18f619ec02..ae55167ce0a6 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -1122,9 +1122,6 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc) vsc->gc.ngpio = 4; vsc->gc.owner = THIS_MODULE; vsc->gc.parent = vsc->dev; -#if IS_ENABLED(CONFIG_OF_GPIO) - vsc->gc.of_node = vsc->dev->of_node; -#endif vsc->gc.base = -1; vsc->gc.get = vsc73xx_gpio_get; vsc->gc.set = vsc73xx_gpio_set; diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c index 910fcb3b252b..35fa19ddaf19 100644 --- a/drivers/net/dsa/xrs700x/xrs700x.c +++ b/drivers/net/dsa/xrs700x/xrs700x.c @@ -501,7 +501,7 @@ static void xrs700x_mac_link_up(struct dsa_switch *ds, int port, } static int xrs700x_bridge_common(struct dsa_switch *ds, int port, - struct net_device *bridge, bool join) + struct dsa_bridge bridge, bool join) { unsigned int i, cpu_mask = 0, mask = 0; struct xrs700x *priv = ds->priv; @@ -513,14 +513,14 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port, cpu_mask |= BIT(i); - if (dsa_to_port(ds, i)->bridge_dev == bridge) + if (dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; mask |= BIT(i); } for (i = 0; i < ds->num_ports; i++) { - if (dsa_to_port(ds, i)->bridge_dev != bridge) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* 1 = Disable forwarding to the port */ @@ -540,13 +540,13 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port, } static int xrs700x_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge, bool *tx_fwd_offload) { return xrs700x_bridge_common(ds, port, bridge, true); } static void xrs700x_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge) { xrs700x_bridge_common(ds, port, bridge, false); } diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 8ef34901c2d8..1111d1f33865 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -225,7 +225,7 @@ static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) list_del(&slave->list); queue->num_slaves--; slave->dev->flags &= ~IFF_SLAVE; - dev_put(slave->dev); + dev_put_track(slave->dev, &slave->dev_tracker); kfree(slave); } @@ -399,7 +399,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) if (duplicate_slave) eql_kill_one_slave(queue, duplicate_slave); - dev_hold(slave->dev); + dev_hold_track(slave->dev, &slave->dev_tracker, GFP_ATOMIC); list_add(&slave->list, &queue->all_slaves); queue->num_slaves++; slave->dev->flags |= IFF_SLAVE; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 027cbacca1c9..db3ec4768159 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -183,6 +183,7 @@ source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" source "drivers/net/ethernet/toshiba/Kconfig" source "drivers/net/ethernet/tundra/Kconfig" +source "drivers/net/ethernet/vertexcom/Kconfig" source "drivers/net/ethernet/via/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 33d30b619e00..8a87c1083d1d 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -93,6 +93,7 @@ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ obj-$(CONFIG_NET_VENDOR_TI) += ti/ obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ +obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/ obj-$(CONFIG_NET_VENDOR_VIA) += via/ obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 800ee022388f..cccf8a3ead5e 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -76,7 +76,6 @@ struct emac_board_info { void __iomem *membase; u32 msg_enable; struct net_device *ndev; - struct sk_buff *skb_last; u16 tx_fifo_stat; int emacrx_completed_flag; @@ -499,7 +498,6 @@ static void emac_rx(struct net_device *dev) struct sk_buff *skb; u8 *rdptr; bool good_packet; - static int rxlen_last; unsigned int reg_val; u32 rxhdr, rxstatus, rxcount, rxlen; @@ -514,22 +512,6 @@ static void emac_rx(struct net_device *dev) if (netif_msg_rx_status(db)) dev_dbg(db->dev, "RXCount: %x\n", rxcount); - if ((db->skb_last != NULL) && (rxlen_last > 0)) { - dev->stats.rx_bytes += rxlen_last; - - /* Pass to upper layer */ - db->skb_last->protocol = eth_type_trans(db->skb_last, - dev); - netif_rx(db->skb_last); - dev->stats.rx_packets++; - db->skb_last = NULL; - rxlen_last = 0; - - reg_val = readl(db->membase + EMAC_RX_CTL_REG); - reg_val &= ~EMAC_RX_CTL_DMA_EN; - writel(reg_val, db->membase + EMAC_RX_CTL_REG); - } - if (!rxcount) { db->emacrx_completed_flag = 1; reg_val = readl(db->membase + EMAC_INT_CTL_REG); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index d75d95a97dd9..993b2fb42961 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1430,16 +1430,19 @@ static int altera_tse_probe(struct platform_device *pdev) priv->rxdescmem_busaddr = dma_res->start; } else { + ret = -ENODEV; goto err_free_netdev; } - if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) + if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) { dma_set_coherent_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)); - else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) + } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) { dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); - else + } else { + ret = -EIO; goto err_free_netdev; + } /* MAC address space */ ret = request_and_map(pdev, "control_port", &control_port, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 30d24d19f40d..492ac383f16d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1508,9 +1508,6 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, if (copy_from_user(&config, ifreq->ifr_data, sizeof(config))) return -EFAULT; - if (config.flags) - return -EINVAL; - mac_tscr = 0; switch (config.tx_type) { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h index 23b2d390fcdd..ace691d7cd75 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -40,10 +40,12 @@ #define AQ_DEVICE_ID_AQC113DEV 0x00C0 #define AQ_DEVICE_ID_AQC113CS 0x94C0 +#define AQ_DEVICE_ID_AQC113CA 0x34C0 #define AQ_DEVICE_ID_AQC114CS 0x93C0 #define AQ_DEVICE_ID_AQC113 0x04C0 #define AQ_DEVICE_ID_AQC113C 0x14C0 #define AQ_DEVICE_ID_AQC115C 0x12C0 +#define AQ_DEVICE_ID_AQC116C 0x11C0 #define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter" @@ -53,20 +55,19 @@ #define AQ_NIC_RATE_10G BIT(0) #define AQ_NIC_RATE_5G BIT(1) -#define AQ_NIC_RATE_5GSR BIT(2) -#define AQ_NIC_RATE_2G5 BIT(3) -#define AQ_NIC_RATE_1G BIT(4) -#define AQ_NIC_RATE_100M BIT(5) -#define AQ_NIC_RATE_10M BIT(6) -#define AQ_NIC_RATE_1G_HALF BIT(7) -#define AQ_NIC_RATE_100M_HALF BIT(8) -#define AQ_NIC_RATE_10M_HALF BIT(9) +#define AQ_NIC_RATE_2G5 BIT(2) +#define AQ_NIC_RATE_1G BIT(3) +#define AQ_NIC_RATE_100M BIT(4) +#define AQ_NIC_RATE_10M BIT(5) +#define AQ_NIC_RATE_1G_HALF BIT(6) +#define AQ_NIC_RATE_100M_HALF BIT(7) +#define AQ_NIC_RATE_10M_HALF BIT(8) -#define AQ_NIC_RATE_EEE_10G BIT(10) -#define AQ_NIC_RATE_EEE_5G BIT(11) -#define AQ_NIC_RATE_EEE_2G5 BIT(12) -#define AQ_NIC_RATE_EEE_1G BIT(13) -#define AQ_NIC_RATE_EEE_100M BIT(14) +#define AQ_NIC_RATE_EEE_10G BIT(9) +#define AQ_NIC_RATE_EEE_5G BIT(10) +#define AQ_NIC_RATE_EEE_2G5 BIT(11) +#define AQ_NIC_RATE_EEE_1G BIT(12) +#define AQ_NIC_RATE_EEE_100M BIT(13) #define AQ_NIC_RATE_EEE_MSK (AQ_NIC_RATE_EEE_10G |\ AQ_NIC_RATE_EEE_5G |\ AQ_NIC_RATE_EEE_2G5 |\ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 062a300a566a..dbd284660135 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -80,6 +80,8 @@ struct aq_hw_link_status_s { }; struct aq_stats_s { + u64 brc; + u64 btc; u64 uprc; u64 mprc; u64 bprc; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index e22935ce9573..e65ce7199dac 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -231,9 +231,6 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev) static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic, struct hwtstamp_config *config) { - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 1acf544afeb4..33f1a1377588 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -316,18 +316,22 @@ int aq_nic_ndev_register(struct aq_nic_s *self) aq_macsec_init(self); #endif - mutex_lock(&self->fwreq_mutex); - err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr); - mutex_unlock(&self->fwreq_mutex); - if (err) - goto err_exit; + if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) { + // If DT has none or an invalid one, ask device for MAC address + mutex_lock(&self->fwreq_mutex); + err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr); + mutex_unlock(&self->fwreq_mutex); - eth_hw_addr_set(self->ndev, addr); + if (err) + goto err_exit; - if (!is_valid_ether_addr(self->ndev->dev_addr) || - !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) { - netdev_warn(self->ndev, "MAC is invalid, will use random."); - eth_hw_addr_random(self->ndev); + if (is_valid_ether_addr(addr) && + aq_nic_is_valid_ether_addr(addr)) { + eth_hw_addr_set(self->ndev, addr); + } else { + netdev_warn(self->ndev, "MAC is invalid, will use random."); + eth_hw_addr_random(self->ndev); + } } #if defined(AQ_CFG_MAC_ADDR_PERMANENT) @@ -905,8 +909,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data) data[++i] = stats->mbtc; data[++i] = stats->bbrc; data[++i] = stats->bbtc; - data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; - data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; + if (stats->brc) + data[++i] = stats->brc; + else + data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; + if (stats->btc) + data[++i] = stats->btc; + else + data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; data[++i] = stats->dma_pkt_rc; data[++i] = stats->dma_pkt_tc; data[++i] = stats->dma_oct_rc; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index d4b1976ee69b..797a95142d1f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = { { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), }, { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), }, { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), }, {} }; @@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = { { AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, { AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, { AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, - { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, }, + { AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, }, + }; MODULE_DEVICE_TABLE(pci, aq_pci_tbl); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index d281322d7dd2..f4774cf051c9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -362,9 +362,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u { unsigned int count; - WARN_ONCE(!aq_vec_is_valid_tc(self, tc), - "Invalid tc %u (#rx=%u, #tx=%u)\n", - tc, self->rx_rings, self->tx_rings); if (!aq_vec_is_valid_tc(self, tc)) return 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 3f1704cbe1cb..7e88d7234b14 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -867,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self) int hw_atl_utils_update_stats(struct aq_hw_s *self) { struct aq_stats_s *cs = &self->curr_stats; + struct aq_stats_s curr_stats = *cs; struct hw_atl_utils_mbox mbox; + bool corrupted_stats = false; hw_atl_utils_mpi_read_stats(self, &mbox); -#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \ - mbox.stats._N_ - self->last_stats._N_) +#define AQ_SDELTA(_N_) \ +do { \ + if (!corrupted_stats && \ + ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \ + curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \ + else \ + corrupted_stats = true; \ +} while (0) if (self->aq_link_status.mbps) { AQ_SDELTA(uprc); @@ -892,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self) AQ_SDELTA(bbrc); AQ_SDELTA(bbtc); AQ_SDELTA(dpc); + + if (!corrupted_stats) + *cs = curr_stats; } #undef AQ_SDELTA diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index eac631c45c56..4d4cfbc91e19 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed) if (speed & AQ_NIC_RATE_5G) rate |= FW2X_RATE_5G; - if (speed & AQ_NIC_RATE_5GSR) - rate |= FW2X_RATE_5G; - if (speed & AQ_NIC_RATE_2G5) rate |= FW2X_RATE_2G5; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c index c98708bb044c..5dfc751572ed 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = { AQ_NIC_RATE_5G | AQ_NIC_RATE_2G5 | AQ_NIC_RATE_1G | - AQ_NIC_RATE_1G_HALF | AQ_NIC_RATE_100M | - AQ_NIC_RATE_100M_HALF | - AQ_NIC_RATE_10M | - AQ_NIC_RATE_10M_HALF, + AQ_NIC_RATE_10M, +}; + +const struct aq_hw_caps_s hw_atl2_caps_aqc115c = { + DEFAULT_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M | + AQ_NIC_RATE_10M, +}; + +const struct aq_hw_caps_s hw_atl2_caps_aqc116c = { + DEFAULT_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M | + AQ_NIC_RATE_10M, }; static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h index de8723f1c28a..346f0dc9912e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h @@ -9,6 +9,8 @@ #include "aq_common.h" extern const struct aq_hw_caps_s hw_atl2_caps_aqc113; +extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c; +extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c; extern const struct aq_hw_ops hw_atl2_ops; #endif /* HW_ATL2_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h index b66fa346581c..6bad64c77b87 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h @@ -239,7 +239,8 @@ struct version_s { u8 minor; u16 build; } phy; - u32 rsvd; + u32 drv_iface_ver:4; + u32 rsvd:28; }; struct link_status_s { @@ -424,7 +425,7 @@ struct cable_diag_status_s { u16 rsvd2; }; -struct statistics_s { +struct statistics_a0_s { struct { u32 link_up; u32 link_down; @@ -457,6 +458,33 @@ struct statistics_s { u32 reserve_fw_gap; }; +struct __packed statistics_b0_s { + u64 rx_good_octets; + u64 rx_pause_frames; + u64 rx_good_frames; + u64 rx_errors; + u64 rx_unicast_frames; + u64 rx_multicast_frames; + u64 rx_broadcast_frames; + + u64 tx_good_octets; + u64 tx_pause_frames; + u64 tx_good_frames; + u64 tx_errors; + u64 tx_unicast_frames; + u64 tx_multicast_frames; + u64 tx_broadcast_frames; + + u32 main_loop_cycles; +}; + +struct __packed statistics_s { + union __packed { + struct statistics_a0_s a0; + struct statistics_b0_s b0; + }; +}; + struct filter_caps_s { u8 l2_filters_base_index:6; u8 flexible_filter_mask:2; @@ -545,7 +573,7 @@ struct management_status_s { u32 rsvd5; }; -struct fw_interface_out { +struct __packed fw_interface_out { struct transaction_counter_s transaction_id; struct version_s version; struct link_status_s link_status; @@ -569,7 +597,6 @@ struct fw_interface_out { struct core_dump_s core_dump; u32 rsvd11; struct statistics_s stats; - u32 rsvd12; struct filter_caps_s filter_caps; struct device_caps_s device_caps; u32 rsvd13; @@ -592,6 +619,9 @@ struct fw_interface_out { #define AQ_HOST_MODE_LOW_POWER 3U #define AQ_HOST_MODE_SHUTDOWN 4U +#define AQ_A2_FW_INTERFACE_A0 0 +#define AQ_A2_FW_INTERFACE_B0 1 + int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops); int hw_atl2_utils_soft_reset(struct aq_hw_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index dd259c8f2f4f..58d426dda3ed 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self, if (cnt > AQ_A2_FW_READ_TRY_MAX) return -ETIME; if (tid1.transaction_cnt_a != tid1.transaction_cnt_b) - udelay(1); + mdelay(1); } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b); hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords); @@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed, { link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G); link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G); - link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR); + link_options->rate_N5G = link_options->rate_5G; link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5); link_options->rate_N2P5G = link_options->rate_2P5G; link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G); @@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps) rate |= AQ_NIC_RATE_10G; if (lkp_link_caps->rate_5G) rate |= AQ_NIC_RATE_5G; - if (lkp_link_caps->rate_N5G) - rate |= AQ_NIC_RATE_5GSR; if (lkp_link_caps->rate_2P5G) rate |= AQ_NIC_RATE_2G5; if (lkp_link_caps->rate_1G) @@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac) return 0; } -static int aq_a2_fw_update_stats(struct aq_hw_s *self) +static void aq_a2_fill_a0_stats(struct aq_hw_s *self, + struct statistics_s *stats) { struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; - struct statistics_s stats; - - hw_atl2_shared_buffer_read_safe(self, stats, &stats); - -#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \ - stats.msm._F_ - priv->last_stats.msm._F_) + struct aq_stats_s *cs = &self->curr_stats; + struct aq_stats_s curr_stats = *cs; + bool corrupted_stats = false; + +#define AQ_SDELTA(_N, _F) \ +do { \ + if (!corrupted_stats && \ + ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \ + curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\ + else \ + corrupted_stats = true; \ +} while (0) if (self->aq_link_status.mbps) { AQ_SDELTA(uprc, rx_unicast_frames); @@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self) AQ_SDELTA(mbtc, tx_multicast_octets); AQ_SDELTA(bbrc, rx_broadcast_octets); AQ_SDELTA(bbtc, tx_broadcast_octets); + + if (!corrupted_stats) + *cs = curr_stats; } #undef AQ_SDELTA - self->curr_stats.dma_pkt_rc = - hw_atl_stats_rx_dma_good_pkt_counter_get(self); - self->curr_stats.dma_pkt_tc = - hw_atl_stats_tx_dma_good_pkt_counter_get(self); - self->curr_stats.dma_oct_rc = - hw_atl_stats_rx_dma_good_octet_counter_get(self); - self->curr_stats.dma_oct_tc = - hw_atl_stats_tx_dma_good_octet_counter_get(self); - self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self); + +} + +static void aq_a2_fill_b0_stats(struct aq_hw_s *self, + struct statistics_s *stats) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct aq_stats_s *cs = &self->curr_stats; + struct aq_stats_s curr_stats = *cs; + bool corrupted_stats = false; + +#define AQ_SDELTA(_N, _F) \ +do { \ + if (!corrupted_stats && \ + ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \ + curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \ + else \ + corrupted_stats = true; \ +} while (0) + + if (self->aq_link_status.mbps) { + AQ_SDELTA(uprc, rx_unicast_frames); + AQ_SDELTA(mprc, rx_multicast_frames); + AQ_SDELTA(bprc, rx_broadcast_frames); + AQ_SDELTA(erpr, rx_errors); + AQ_SDELTA(brc, rx_good_octets); + + AQ_SDELTA(uptc, tx_unicast_frames); + AQ_SDELTA(mptc, tx_multicast_frames); + AQ_SDELTA(bptc, tx_broadcast_frames); + AQ_SDELTA(erpt, tx_errors); + AQ_SDELTA(btc, tx_good_octets); + + if (!corrupted_stats) + *cs = curr_stats; + } +#undef AQ_SDELTA +} + +static int aq_a2_fw_update_stats(struct aq_hw_s *self) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct aq_stats_s *cs = &self->curr_stats; + struct statistics_s stats; + struct version_s version; + int err; + + err = hw_atl2_shared_buffer_read_safe(self, version, &version); + if (err) + return err; + + err = hw_atl2_shared_buffer_read_safe(self, stats, &stats); + if (err) + return err; + + if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0) + aq_a2_fill_a0_stats(self, &stats); + else + aq_a2_fill_b0_stats(self, &stats); + + cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self); + cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self); + cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self); + cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self); + cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self); memcpy(&priv->last_stats, &stats, sizeof(stats)); @@ -499,9 +563,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self) hw_atl2_shared_buffer_read_safe(self, version, &version); /* A2 FW version is stored in reverse order */ - return version.mac.major << 24 | - version.mac.minor << 16 | - version.mac.build; + return version.bundle.major << 24 | + version.bundle.minor << 16 | + version.bundle.build; } int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self, diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index ff924f06581e..270c2935591b 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1024,17 +1024,6 @@ static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode, ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]); } -static void ag71xx_mac_pcs_get_state(struct phylink_config *config, - struct phylink_link_state *state) -{ - state->link = 0; -} - -static void ag71xx_mac_an_restart(struct phylink_config *config) -{ - /* Not Supported */ -} - static void ag71xx_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { @@ -1098,8 +1087,6 @@ static void ag71xx_mac_link_up(struct phylink_config *config, static const struct phylink_mac_ops ag71xx_phylink_mac_ops = { .validate = phylink_generic_validate, - .mac_pcs_get_state = ag71xx_mac_pcs_get_state, - .mac_an_restart = ag71xx_mac_an_restart, .mac_config = ag71xx_mac_config, .mac_link_down = ag71xx_mac_link_down, .mac_link_up = ag71xx_mac_link_up, diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 7cc5213c575a..b07cb9bc5f2d 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -708,7 +708,9 @@ static int bcm4908_enet_probe(struct platform_device *pdev) enet->irq_tx = platform_get_irq_byname(pdev, "tx"); - dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + if (err) + return err; err = bcm4908_enet_dma_alloc(enet); if (err) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index aec666e97683..651bc1d7a57a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -15356,11 +15356,6 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", config.tx_type, config.rx_filter); - if (config.flags) { - BNX2X_ERR("config.flags is reserved for future use\n"); - return -EINVAL; - } - bp->hwtstamp_ioctl_called = true; bp->tx_type = config.tx_type; bp->rx_filter = config.rx_filter; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 8388be119f9a..48520967746f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -417,9 +417,6 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) return -EFAULT; - if (stmpconf.flags) - return -EINVAL; - if (stmpconf.tx_type != HWTSTAMP_TX_ON && stmpconf.tx_type != HWTSTAMP_TX_OFF) return -ERANGE; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 283f3c1f1195..c28f8cc00d1c 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -13806,9 +13806,6 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) return -EFAULT; - if (stmpconf.flags) - return -EINVAL; - if (stmpconf.tx_type != HWTSTAMP_TX_ON && stmpconf.tx_type != HWTSTAMP_TX_OFF) return -ERANGE; diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 4b4ad731897f..8aca768571b2 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -235,13 +235,18 @@ static int bnad_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { - u32 supported, advertising; - - supported = SUPPORTED_10000baseT_Full; - advertising = ADVERTISED_10000baseT_Full; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseCR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseCR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full); cmd->base.autoneg = AUTONEG_DISABLE; - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_FIBRE; cmd->base.phy_address = 0; @@ -253,11 +258,6 @@ bnad_get_link_ksettings(struct net_device *netdev, cmd->base.duplex = DUPLEX_UNKNOWN; } - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); - return 0; } diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 095c5a2144a7..fb6b27f46b15 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -464,10 +464,6 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd) sizeof(*tstamp_config))) return -EFAULT; - /* reserved for future extensions */ - if (tstamp_config->flags) - return -EINVAL; - switch (tstamp_config->tx_type) { case HWTSTAMP_TX_OFF: break; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 12eee2bc7f5c..ba28aa444e5a 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2114,9 +2114,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) return -EFAULT; - if (conf.flags) - return -EINVAL; - switch (conf.tx_type) { case HWTSTAMP_TX_ON: case HWTSTAMP_TX_OFF: diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index c607756b731f..568f211d91cc 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1254,9 +1254,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) return -EFAULT; - if (conf.flags) - return -EINVAL; - switch (conf.tx_type) { case HWTSTAMP_TX_ON: case HWTSTAMP_TX_OFF: diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 4b4ffdd1044d..103591dcea1c 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -702,9 +702,6 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, if (copy_from_user(&config, rq->ifr_data, sizeof(config))) return -EFAULT; - if (config.flags) /* reserved for future extensions */ - return -EINVAL; - /* Check the status of hardware for tiemstamps */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { /* Get the current state of the PTP clock */ diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index bb45d5df2856..63191692f624 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1917,10 +1917,6 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c index 4bfb4d8508f5..eaad453d487e 100644 --- a/drivers/net/ethernet/engleder/tsnep_ptp.c +++ b/drivers/net/ethernet/engleder/tsnep_ptp.c @@ -31,9 +31,6 @@ int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 6451c8383639..8e643567abce 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4550,6 +4550,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) fsl_mc_portal_free(priv->mc_io); + destroy_workqueue(priv->dpaa2_ptp_wq); + dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); free_netdev(net_dev); diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 7b4961daa254..ed7301b69169 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -377,6 +377,9 @@ struct bufdesc_ex { #define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */ #define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) #define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) +#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \ + (((X) == 1) ? FEC_ENET_RXF_1 : \ + FEC_ENET_RXF_2)) #define FEC_ENET_TS_AVAIL ((uint)0x00010000) #define FEC_ENET_TS_TIMER ((uint)0x00008000) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index f38e70692514..796133de527e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1185,6 +1185,21 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) } } +static void fec_irqs_disable(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + writel(0, fep->hwp + FEC_IMASK); +} + +static void fec_irqs_disable_except_wakeup(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + writel(0, fep->hwp + FEC_IMASK); + writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); +} + static void fec_stop(struct net_device *ndev) { @@ -1211,15 +1226,13 @@ fec_stop(struct net_device *ndev) writel(1, fep->hwp + FEC_ECNTRL); udelay(10); } - writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } else { - writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); val = readl(fep->hwp + FEC_ECNTRL); val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); writel(val, fep->hwp + FEC_ECNTRL); - fec_enet_stop_mode(fep, true); } writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); /* We have to keep ENET enabled to have MII interrupt stay working */ if (fep->quirks & FEC_QUIRK_ENET_MAC && @@ -1480,7 +1493,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) break; pkt_received++; - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); + writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); /* Check for errors. */ status ^= BD_ENET_RX_LAST; @@ -2877,15 +2890,10 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) return -EINVAL; device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); - if (device_may_wakeup(&ndev->dev)) { + if (device_may_wakeup(&ndev->dev)) fep->wol_flag |= FEC_WOL_FLAG_ENABLE; - if (fep->wake_irq > 0) - enable_irq_wake(fep->wake_irq); - } else { + else fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); - if (fep->wake_irq > 0) - disable_irq_wake(fep->wake_irq); - } return 0; } @@ -4057,9 +4065,19 @@ static int __maybe_unused fec_suspend(struct device *dev) netif_device_detach(ndev); netif_tx_unlock_bh(ndev); fec_stop(ndev); - fec_enet_clk_enable(ndev, false); - if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) + if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { + fec_irqs_disable(ndev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); + } else { + fec_irqs_disable_except_wakeup(ndev); + if (fep->wake_irq > 0) { + disable_irq(fep->wake_irq); + enable_irq_wake(fep->wake_irq); + } + fec_enet_stop_mode(fep, true); + } + /* It's safe to disable clocks since interrupts are masked */ + fec_enet_clk_enable(ndev, false); } rtnl_unlock(); @@ -4097,6 +4115,10 @@ static int __maybe_unused fec_resume(struct device *dev) } if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { fec_enet_stop_mode(fep, false); + if (fep->wake_irq) { + disable_irq_wake(fep->wake_irq); + enable_irq(fep->wake_irq); + } val = readl(fep->hwp + FEC_ECNTRL); val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index d71eac7e1924..af99017a5453 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -473,10 +473,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: fep->hwts_tx_en = 0; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index acab58fd3db3..206b7a35eaf5 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2076,10 +2076,6 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: priv->hwts_tx_en = 0; diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c index 88ca49cbc1e2..d57508bc4307 100644 --- a/drivers/net/ethernet/google/gve/gve_utils.c +++ b/drivers/net/ethernet/google/gve/gve_utils.c @@ -68,6 +68,9 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1; } else { skb = napi_alloc_skb(napi, len); + + if (unlikely(!skb)) + return NULL; set_protocol = true; } __skb_put(skb, len); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 081295bff765..817e2e8a7287 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -1083,7 +1083,7 @@ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring, sprintf(result[j++], "%u", index); sprintf(result[j++], "%u", READ_ONCE(ring->page_pool->pages_state_hold_cnt)); - sprintf(result[j++], "%u", + sprintf(result[j++], "%d", atomic_read(&ring->page_pool->pages_state_release_cnt)); sprintf(result[j++], "%u", ring->page_pool->p.pool_size); sprintf(result[j++], "%u", ring->page_pool->p.order); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h index bd8801065e02..83aa1450ab9f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -4,6 +4,8 @@ #ifndef __HNS3_DEBUGFS_H #define __HNS3_DEBUGFS_H +#include "hnae3.h" + #define HNS3_DBG_READ_LEN 65536 #define HNS3_DBG_READ_LEN_128KB 0x20000 #define HNS3_DBG_READ_LEN_1MB 0x100000 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 8c7707263f9d..babc5d7a3b52 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1002,9 +1002,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring, return false; if (ALIGN(len, dma_get_cache_alignment()) > space) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_spare_full++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_spare_full); return false; } @@ -1021,9 +1019,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, return false; if (space < HNS3_MAX_SGL_SIZE) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_spare_full++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_spare_full); return false; } @@ -1548,92 +1544,122 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb) return true; } -static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, - struct sk_buff *skb, struct hns3_desc *desc, - struct hns3_desc_cb *desc_cb) +struct hns3_desc_param { + u32 paylen_ol4cs; + u32 ol_type_vlan_len_msec; + u32 type_cs_vlan_tso; + u16 mss_hw_csum; + u16 inner_vtag; + u16 out_vtag; +}; + +static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa) +{ + pa->paylen_ol4cs = skb->len; + pa->ol_type_vlan_len_msec = 0; + pa->type_cs_vlan_tso = 0; + pa->mss_hw_csum = 0; + pa->inner_vtag = 0; + pa->out_vtag = 0; +} + +static int hns3_handle_vlan_info(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_param *param) { - u32 ol_type_vlan_len_msec = 0; - u32 paylen_ol4cs = skb->len; - u32 type_cs_vlan_tso = 0; - u16 mss_hw_csum = 0; - u16 inner_vtag = 0; - u16 out_vtag = 0; int ret; ret = hns3_handle_vtags(ring, skb); if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_vlan_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_vlan_err); return ret; } else if (ret == HNS3_INNER_VLAN_TAG) { - inner_vtag = skb_vlan_tag_get(skb); - inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + param->inner_vtag = skb_vlan_tag_get(skb); + param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; - hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); + hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); } else if (ret == HNS3_OUTER_VLAN_TAG) { - out_vtag = skb_vlan_tag_get(skb); - out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + param->out_vtag = skb_vlan_tag_get(skb); + param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; - hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, + hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, 1); } + return 0; +} - desc_cb->send_bytes = skb->len; +static int hns3_handle_csum_partial(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_cb *desc_cb, + struct hns3_desc_param *param) +{ + u8 ol4_proto, il4_proto; + int ret; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 ol4_proto, il4_proto; - - if (hns3_check_hw_tx_csum(skb)) { - /* set checksum start and offset, defined in 2 Bytes */ - hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, - skb_checksum_start_offset(skb) >> 1); - hns3_set_field(ol_type_vlan_len_msec, - HNS3_TXD_CSUM_OFFSET_S, - skb->csum_offset >> 1); - mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); - goto out_hw_tx_csum; - } + if (hns3_check_hw_tx_csum(skb)) { + /* set checksum start and offset, defined in 2 Bytes */ + hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, + skb_checksum_start_offset(skb) >> 1); + hns3_set_field(param->ol_type_vlan_len_msec, + HNS3_TXD_CSUM_OFFSET_S, + skb->csum_offset >> 1); + param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); + return 0; + } - skb_reset_mac_len(skb); + skb_reset_mac_len(skb); - ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_l4_proto_err++; - u64_stats_update_end(&ring->syncp); - return ret; - } + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_l4_proto_err); + return ret; + } - ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_l2l3l4_err++; - u64_stats_update_end(&ring->syncp); - return ret; - } + ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, + ¶m->type_cs_vlan_tso, + ¶m->ol_type_vlan_len_msec); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_l2l3l4_err); + return ret; + } + + ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum, + ¶m->type_cs_vlan_tso, &desc_cb->send_bytes); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_tso_err); + return ret; + } + return 0; +} + +static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, struct hns3_desc *desc, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc_param param; + int ret; + + hns3_init_desc_data(skb, ¶m); + ret = hns3_handle_vlan_info(ring, skb, ¶m); + if (unlikely(ret < 0)) + return ret; + + desc_cb->send_bytes = skb->len; - ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, - &type_cs_vlan_tso, &desc_cb->send_bytes); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_tso_err++; - u64_stats_update_end(&ring->syncp); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ret = hns3_handle_csum_partial(ring, skb, desc_cb, ¶m); + if (ret) return ret; - } } -out_hw_tx_csum: /* Set txbd */ desc->tx.ol_type_vlan_len_msec = - cpu_to_le32(ol_type_vlan_len_msec); - desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); - desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs); - desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum); - desc->tx.vlan_tag = cpu_to_le16(inner_vtag); - desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); + cpu_to_le32(param.ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso); + desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs); + desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum); + desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag); + desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag); return 0; } @@ -1713,9 +1739,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, } if (unlikely(dma_mapping_error(dev, dma))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -1861,9 +1885,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring, * recursion level of over HNS3_MAX_RECURSION_LEVEL. */ if (bd_num == UINT_MAX) { - u64_stats_update_begin(&ring->syncp); - ring->stats.over_max_recursion++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, over_max_recursion); return -ENOMEM; } @@ -1872,16 +1894,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring, */ if (skb->len > HNS3_MAX_TSO_SIZE || (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.hw_limitation++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, hw_limitation); return -ENOMEM; } if (__skb_linearize(skb)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -1911,9 +1929,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, bd_num = hns3_tx_bd_count(skb->len); - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_copy++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_copy); } out: @@ -1933,9 +1949,7 @@ out: return bd_num; } - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_busy); return -EBUSY; } @@ -2020,9 +2034,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, ring->pending_buf += num; if (!doorbell) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_more++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_more); return; } @@ -2072,9 +2084,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, ret = skb_copy_bits(skb, 0, buf, size); if (unlikely(ret < 0)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.copy_bits_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, copy_bits_err); return ret; } @@ -2097,9 +2107,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, dma_sync_single_for_device(ring_to_dev(ring), dma, size, DMA_TO_DEVICE); - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_bounce++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_bounce); + return bd_num; } @@ -2129,9 +2138,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); if (unlikely(nents < 0)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.skb2sgl_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, skb2sgl_err); return -ENOMEM; } @@ -2140,9 +2147,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, DMA_TO_DEVICE); if (unlikely(!sgt->nents)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.map_sg_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, map_sg_err); return -ENOMEM; } @@ -2154,10 +2159,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, for (i = 0; i < sgt->nents; i++) bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), sg_dma_len(sgt->sgl + i)); - - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_sgl++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_sgl); return bd_num; } @@ -2182,23 +2184,45 @@ out: return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); } +static int hns3_handle_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_cb *desc_cb, + int next_to_use_head) +{ + int ret; + + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], + desc_cb); + if (unlikely(ret < 0)) + goto fill_err; + + /* 'ret < 0' means filling error, 'ret == 0' means skb->len is + * zero, which is unlikely, and 'ret > 0' means how many tx desc + * need to be notified to the hw. + */ + ret = hns3_handle_desc_filling(ring, skb); + if (likely(ret > 0)) + return ret; + +fill_err: + hns3_clear_desc(ring, next_to_use_head); + return ret; +} + netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct netdev_queue *dev_queue; - int pre_ntu, next_to_use_head; + int pre_ntu, ret; bool doorbell; - int ret; /* Hardware can only handle short frames above 32 bytes */ if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return NETDEV_TX_OK; } @@ -2217,20 +2241,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) goto out_err_tx_ok; } - next_to_use_head = ring->next_to_use; - - ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], - desc_cb); - if (unlikely(ret < 0)) - goto fill_err; - - /* 'ret < 0' means filling error, 'ret == 0' means skb->len is - * zero, which is unlikely, and 'ret > 0' means how many tx desc - * need to be notified to the hw. - */ - ret = hns3_handle_desc_filling(ring, skb); + ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use); if (unlikely(ret <= 0)) - goto fill_err; + goto out_err_tx_ok; pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : (ring->desc_num - 1); @@ -2252,9 +2265,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; -fill_err: - hns3_clear_desc(ring, next_to_use_head); - out_err_tx_ok: dev_kfree_skb_any(skb); hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); @@ -3522,17 +3532,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, for (i = 0; i < cleand_count; i++) { desc_cb = &ring->desc_cb[ring->next_to_use]; if (desc_cb->reuse_flag) { - u64_stats_update_begin(&ring->syncp); - ring->stats.reuse_pg_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, reuse_pg_cnt); hns3_reuse_buffer(ring, ring->next_to_use); } else { ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); hns3_rl_err(ring_to_netdev(ring), "alloc rx buffer failed: %d\n", @@ -3544,9 +3550,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, } hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); - u64_stats_update_begin(&ring->syncp); - ring->stats.non_reuse_pg++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, non_reuse_pg); } ring_ptr_move_fw(ring, next_to_use); @@ -3573,9 +3577,7 @@ static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i, void *frag = napi_alloc_frag(frag_size); if (unlikely(!frag)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.frag_alloc_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, frag_alloc_err); hns3_rl_err(ring_to_netdev(ring), "failed to allocate rx frag\n"); @@ -3587,9 +3589,7 @@ static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i, skb_add_rx_frag(skb, i, virt_to_page(frag), offset_in_page(frag), frag_size, frag_size); - u64_stats_update_begin(&ring->syncp); - ring->stats.frag_alloc++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, frag_alloc); return 0; } @@ -3722,9 +3722,7 @@ static bool hns3_checksum_complete(struct hns3_enet_ring *ring, hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) return false; - u64_stats_update_begin(&ring->syncp); - ring->stats.csum_complete++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, csum_complete); skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)csum); @@ -3798,9 +3796,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | BIT(HNS3_RXD_OL3E_B) | BIT(HNS3_RXD_OL4E_B)))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.l3l4_csum_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, l3l4_csum_err); return; } @@ -3891,10 +3887,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, skb = ring->skb; if (unlikely(!skb)) { hns3_rl_err(netdev, "alloc rx skb fail\n"); - - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -3925,9 +3918,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, if (ring->page_pool) skb_mark_for_recycle(skb); - u64_stats_update_begin(&ring->syncp); - ring->stats.seg_pkt_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, seg_pkt_cnt); ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); __skb_put(skb, ring->pull_len); @@ -4135,9 +4126,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info, ol_info, csum); if (unlikely(ret)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.rx_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, rx_err_cnt); return ret; } @@ -4353,87 +4342,70 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) return rx_pkt_total; } -static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, - struct hnae3_ring_chain_node *head) +static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node **head, + bool is_tx) { + u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX; + u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX; + struct hnae3_ring_chain_node *cur_chain = *head; struct pci_dev *pdev = tqp_vector->handle->pdev; - struct hnae3_ring_chain_node *cur_chain = head; struct hnae3_ring_chain_node *chain; - struct hns3_enet_ring *tx_ring; - struct hns3_enet_ring *rx_ring; - - tx_ring = tqp_vector->tx_group.ring; - if (tx_ring) { - cur_chain->tqp_index = tx_ring->tqp->tqp_index; - hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); - - cur_chain->next = NULL; - - while (tx_ring->next) { - tx_ring = tx_ring->next; - - chain = devm_kzalloc(&pdev->dev, sizeof(*chain), - GFP_KERNEL); - if (!chain) - goto err_free_chain; - - cur_chain->next = chain; - chain->tqp_index = tx_ring->tqp->tqp_index; - hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae3_set_field(chain->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, - HNAE3_RING_GL_TX); - - cur_chain = chain; - } - } + struct hns3_enet_ring *ring; - rx_ring = tqp_vector->rx_group.ring; - if (!tx_ring && rx_ring) { - cur_chain->next = NULL; - cur_chain->tqp_index = rx_ring->tqp->tqp_index; - hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring; - rx_ring = rx_ring->next; + if (cur_chain) { + while (cur_chain->next) + cur_chain = cur_chain->next; } - while (rx_ring) { + while (ring) { chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); if (!chain) - goto err_free_chain; - - cur_chain->next = chain; - chain->tqp_index = rx_ring->tqp->tqp_index; + return -ENOMEM; + if (cur_chain) + cur_chain->next = chain; + else + *head = chain; + chain->tqp_index = ring->tqp->tqp_index; hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + bit_value); + hnae3_set_field(chain->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, field_value); cur_chain = chain; - rx_ring = rx_ring->next; + ring = ring->next; } return 0; +} + +static struct hnae3_ring_chain_node * +hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *cur_chain = NULL; + struct hnae3_ring_chain_node *chain; + + if (hns3_create_ring_chain(tqp_vector, &cur_chain, true)) + goto err_free_chain; + + if (hns3_create_ring_chain(tqp_vector, &cur_chain, false)) + goto err_free_chain; + + return cur_chain; err_free_chain: - cur_chain = head->next; while (cur_chain) { chain = cur_chain->next; devm_kfree(&pdev->dev, cur_chain); cur_chain = chain; } - head->next = NULL; - return -ENOMEM; + return NULL; } static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, @@ -4442,7 +4414,7 @@ static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, struct pci_dev *pdev = tqp_vector->handle->pdev; struct hnae3_ring_chain_node *chain_tmp, *chain; - chain = head->next; + chain = head; while (chain) { chain_tmp = chain->next; @@ -4557,7 +4529,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) } for (i = 0; i < priv->vector_num; i++) { - struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_ring_chain_node *vector_ring_chain; tqp_vector = &priv->tqp_vector[i]; @@ -4567,15 +4539,16 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) tqp_vector->tx_group.total_packets = 0; tqp_vector->handle = h; - ret = hns3_get_vector_ring_chain(tqp_vector, - &vector_ring_chain); - if (ret) + vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); + if (!vector_ring_chain) { + ret = -ENOMEM; goto map_ring_fail; + } ret = h->ae_algo->ops->map_ring_to_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); + tqp_vector->vector_irq, vector_ring_chain); - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); if (ret) goto map_ring_fail; @@ -4674,7 +4647,7 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) { - struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_ring_chain_node *vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; int i; @@ -4689,13 +4662,14 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) * chain between vector and ring, we should go on to deal with * the remaining options. */ - if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) + vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); + if (!vector_ring_chain) dev_warn(priv->dev, "failed to get ring chain\n"); h->ae_algo->ops->unmap_ring_from_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); + tqp_vector->vector_irq, vector_ring_chain); - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); hns3_clear_ring_group(&tqp_vector->rx_group); hns3_clear_ring_group(&tqp_vector->tx_group); @@ -5347,9 +5321,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) if (!ring->desc_cb[ring->next_to_use].reuse_flag) { ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); /* if alloc new buffer fail, exit directly * and reclear in up flow. */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 808405cc0280..a05a0c7423ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -10,6 +10,9 @@ #include "hnae3.h" +struct iphdr; +struct ipv6hdr; + enum hns3_nic_state { HNS3_NIC_STATE_TESTING, HNS3_NIC_STATE_RESETTING, @@ -660,6 +663,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) #define hns3_buf_size(_ring) ((_ring)->buf_size) +#define hns3_ring_stats_update(ring, cnt) do { \ + typeof(ring) (tmp) = (ring); \ + u64_stats_update_begin(&(tmp)->syncp); \ + ((tmp)->stats.cnt)++; \ + u64_stats_update_end(&(tmp)->syncp); \ +} while (0) \ + static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) { #if (PAGE_SIZE < 8192) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 65168125c42e..c287be8bc48d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -77,6 +77,10 @@ static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { .cmd = HCLGE_OPC_DFX_TQP_REG } }, }; +/* make sure: len(name) + interval >= maxlen(item data) + 2, + * for example, name = "pkt_num"(len: 7), the prototype of item data is u32, + * and print as "%u"(maxlen: 10), so the interval should be at least 5. + */ static void hclge_dbg_fill_content(char *content, u16 len, const struct hclge_dbg_item *items, const char **result, u16 size) @@ -99,7 +103,7 @@ static void hclge_dbg_fill_content(char *content, u16 len, static char *hclge_dbg_get_func_id_str(char *buf, u8 id) { if (id) - sprintf(buf, "vf%u", id - 1); + sprintf(buf, "vf%u", id - 1U); else sprintf(buf, "pf"); @@ -778,7 +782,6 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len) data_str = kcalloc(ARRAY_SIZE(tm_pg_items), HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL); - if (!data_str) return -ENOMEM; @@ -1764,7 +1767,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len) #define HCLGE_MAX_NCL_CONFIG_LENGTH 16384 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index, - char *buf, int *len, int *pos) + char *buf, int len, int *pos) { #define HCLGE_CMD_DATA_NUM 6 @@ -1776,7 +1779,7 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index, if (i == 0 && j == 0) continue; - *pos += scnprintf(buf + *pos, *len - *pos, + *pos += scnprintf(buf + *pos, len - *pos, "0x%04x | 0x%08x\n", offset, le32_to_cpu(desc[i].data[j])); @@ -1814,7 +1817,7 @@ hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len) if (ret) return ret; - hclge_ncl_config_data_print(desc, &index, buf, &len, &pos); + hclge_ncl_config_data_print(desc, &index, buf, len, &pos); } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 1815fcf168b0..1d1c4514aac2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1613,12 +1613,39 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) hdev->num_rx_desc = HCLGE_MIN_RX_DESC; } +static void hclge_init_tc_config(struct hclge_dev *hdev) +{ + unsigned int i; + + if (hdev->tc_max > HNAE3_MAX_TC || + hdev->tc_max < 1) { + dev_warn(&hdev->pdev->dev, "TC num = %u.\n", + hdev->tc_max); + hdev->tc_max = 1; + } + + /* Dev does not support DCB */ + if (!hnae3_dev_dcb_supported(hdev)) { + hdev->tc_max = 1; + hdev->pfc_max = 0; + } else { + hdev->pfc_max = hdev->tc_max; + } + + hdev->tm_info.num_tc = 1; + + /* Currently not support uncontiuous tc */ + for (i = 0; i < hdev->tm_info.num_tc; i++) + hnae3_set_bit(hdev->hw_tc_map, i, 1); + + hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; +} + static int hclge_configure(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); const struct cpumask *cpumask = cpu_online_mask; struct hclge_cfg cfg; - unsigned int i; int node, ret; ret = hclge_get_cfg(hdev, &cfg); @@ -1662,29 +1689,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); - if ((hdev->tc_max > HNAE3_MAX_TC) || - (hdev->tc_max < 1)) { - dev_warn(&hdev->pdev->dev, "TC num = %u.\n", - hdev->tc_max); - hdev->tc_max = 1; - } - - /* Dev does not support DCB */ - if (!hnae3_dev_dcb_supported(hdev)) { - hdev->tc_max = 1; - hdev->pfc_max = 0; - } else { - hdev->pfc_max = hdev->tc_max; - } - - hdev->tm_info.num_tc = 1; - - /* Currently not support uncontiuous tc */ - for (i = 0; i < hdev->tm_info.num_tc; i++) - hnae3_set_bit(hdev->hw_tc_map, i, 1); - - hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; - + hclge_init_tc_config(hdev); hclge_init_kdump_kernel_config(hdev); /* Set the affinity based on numa node */ @@ -1885,7 +1890,7 @@ static int hclge_map_tqp(struct hclge_dev *hdev) u16 i, num_vport; num_vport = hdev->num_req_vfs + 1; - for (i = 0; i < num_vport; i++) { + for (i = 0; i < num_vport; i++) { int ret; ret = hclge_map_tqp_to_vport(hdev, vport); @@ -6801,7 +6806,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, if (vf > hdev->num_req_vfs) { dev_err(&hdev->pdev->dev, "Error: vf id (%u) should be less than %u\n", - vf - 1, hdev->num_req_vfs); + vf - 1U, hdev->num_req_vfs); return -EINVAL; } @@ -6811,7 +6816,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, if (ring >= tqps) { dev_err(&hdev->pdev->dev, "Error: queue id (%u) > max tqp num (%u)\n", - ring, tqps - 1); + ring, tqps - 1U); return -EINVAL; } @@ -7172,6 +7177,37 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, } } +static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, + u16 location) +{ + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location == location) + return rule; + else if (rule->location > location) + return NULL; + } + + return NULL; +} + +static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + fs->ring_cookie = RX_CLS_FLOW_DISC; + } else { + u64 vf_id; + + fs->ring_cookie = rule->queue_id; + vf_id = rule->vf_id; + vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fs->ring_cookie |= vf_id; + } +} + static int hclge_get_fd_rule_info(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd) { @@ -7179,7 +7215,6 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, struct hclge_fd_rule *rule = NULL; struct hclge_dev *hdev = vport->back; struct ethtool_rx_flow_spec *fs; - struct hlist_node *node2; if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; @@ -7188,14 +7223,9 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, spin_lock_bh(&hdev->fd_rule_lock); - hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { - if (rule->location >= fs->location) - break; - } - - if (!rule || fs->location != rule->location) { + rule = hclge_get_fd_rule(hdev, fs->location); + if (!rule) { spin_unlock_bh(&hdev->fd_rule_lock); - return -ENOENT; } @@ -7233,16 +7263,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, hclge_fd_get_ext_info(fs, rule); - if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { - fs->ring_cookie = RX_CLS_FLOW_DISC; - } else { - u64 vf_id; - - fs->ring_cookie = rule->queue_id; - vf_id = rule->vf_id; - vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; - fs->ring_cookie |= vf_id; - } + hclge_fd_get_ring_cookie(fs, rule); spin_unlock_bh(&hdev->fd_rule_lock); @@ -7979,16 +8000,13 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, - enum hnae3_loop loop_mode) +static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) { -#define HCLGE_COMMON_LB_RETRY_MS 10 -#define HCLGE_COMMON_LB_RETRY_NUM 100 - struct hclge_common_lb_cmd *req; struct hclge_desc desc; - int ret, i = 0; u8 loop_mode_b; + int ret; req = (struct hclge_common_lb_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false); @@ -8005,23 +8023,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, break; default: dev_err(&hdev->pdev->dev, - "unsupported common loopback mode %d\n", loop_mode); + "unsupported loopback mode %d\n", loop_mode); return -ENOTSUPP; } - if (en) { + req->mask = loop_mode_b; + if (en) req->enable = loop_mode_b; - req->mask = loop_mode_b; - } else { - req->mask = loop_mode_b; - } ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, - "common loopback set fail, ret = %d\n", ret); - return ret; - } + "failed to send loopback cmd, loop_mode = %d, ret = %d\n", + loop_mode, ret); + + return ret; +} + +static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) +{ +#define HCLGE_COMMON_LB_RETRY_MS 10 +#define HCLGE_COMMON_LB_RETRY_NUM 100 + + struct hclge_common_lb_cmd *req; + struct hclge_desc desc; + u32 i = 0; + int ret; + + req = (struct hclge_common_lb_cmd *)desc.data; do { msleep(HCLGE_COMMON_LB_RETRY_MS); @@ -8030,20 +8059,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "common loopback get, ret = %d\n", ret); + "failed to get loopback done status, ret = %d\n", + ret); return ret; } } while (++i < HCLGE_COMMON_LB_RETRY_NUM && !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { - dev_err(&hdev->pdev->dev, "common loopback set timeout\n"); + dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); return -EBUSY; } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { - dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n"); + dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); return -EIO; } - return ret; + + return 0; +} + +static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) +{ + int ret; + + ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); + if (ret) + return ret; + + return hclge_cfg_common_loopback_wait(hdev); } static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, @@ -9703,8 +9746,8 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(&hdev->pdev->dev, - "failed to get vlan filter config, ret = %d.\n", ret); + dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", + vf_id, ret); return ret; } @@ -9715,8 +9758,8 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) - dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n", - ret); + dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", + vf_id, ret); return ret; } @@ -9962,6 +10005,32 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, return ret; } +static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, + u16 vlan_id, bool is_kill) +{ + /* vlan 0 may be added twice when 8021q module is enabled */ + if (!is_kill && !vlan_id && + test_bit(vport_id, hdev->vlan_table[vlan_id])) + return false; + + if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_warn(&hdev->pdev->dev, + "Add port vlan failed, vport %u is already in vlan %u\n", + vport_id, vlan_id); + return false; + } + + if (is_kill && + !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_warn(&hdev->pdev->dev, + "Delete port vlan failed, vport %u is not in vlan %u\n", + vport_id, vlan_id); + return false; + } + + return true; +} + static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, u16 vport_id, u16 vlan_id, bool is_kill) @@ -9983,26 +10052,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, return ret; } - /* vlan 0 may be added twice when 8021q module is enabled */ - if (!is_kill && !vlan_id && - test_bit(vport_id, hdev->vlan_table[vlan_id])) + if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) return 0; - if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { - dev_err(&hdev->pdev->dev, - "Add port vlan failed, vport %u is already in vlan %u\n", - vport_id, vlan_id); - return -EINVAL; - } - - if (is_kill && - !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { - dev_err(&hdev->pdev->dev, - "Delete port vlan failed, vport %u is not in vlan %u\n", - vport_id, vlan_id); - return -EINVAL; - } - for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) vport_num++; @@ -10194,67 +10246,80 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) return status; } -static int hclge_init_vlan_config(struct hclge_dev *hdev) +static int hclge_init_vlan_filter(struct hclge_dev *hdev) { -#define HCLGE_DEF_VLAN_TYPE 0x8100 - - struct hnae3_handle *handle = &hdev->vport[0].nic; struct hclge_vport *vport; int ret; int i; - if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { - /* for revision 0x21, vf vlan filter is per function */ - for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - ret = hclge_set_vlan_filter_ctrl(hdev, - HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS, - true, - vport->vport_id); - if (ret) - return ret; - vport->cur_vlan_fltr_en = true; - } + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + true, 0); - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, true, - 0); - if (ret) - return ret; - } else { + /* for revision 0x21, vf vlan filter is per function */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS_V1_B, - true, 0); + HCLGE_FILTER_FE_EGRESS, true, + vport->vport_id); if (ret) return ret; + vport->cur_vlan_fltr_en = true; } - hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, true, 0); +} - ret = hclge_set_vlan_protocol_type(hdev); - if (ret) - return ret; +static int hclge_init_vlan_type(struct hclge_dev *hdev) +{ + hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; - for (i = 0; i < hdev->num_alloc_vport; i++) { - u16 vlan_tag; - u8 qos; + return hclge_set_vlan_protocol_type(hdev); +} + +static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) +{ + struct hclge_port_base_vlan_config *cfg; + struct hclge_vport *vport; + int ret; + int i; + for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; - vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - qos = vport->port_base_vlan_cfg.vlan_info.qos; + cfg = &vport->port_base_vlan_cfg; - ret = hclge_vlan_offload_cfg(vport, - vport->port_base_vlan_cfg.state, - vlan_tag, qos); + ret = hclge_vlan_offload_cfg(vport, cfg->state, + cfg->vlan_info.vlan_tag, + cfg->vlan_info.qos); if (ret) return ret; } + return 0; +} + +static int hclge_init_vlan_config(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport[0].nic; + int ret; + + ret = hclge_init_vlan_filter(hdev); + if (ret) + return ret; + + ret = hclge_init_vlan_type(hdev); + if (ret) + return ret; + + ret = hclge_init_vport_vlan_offload(hdev); + if (ret) + return ret; return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } @@ -10511,12 +10576,41 @@ static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, return false; } +static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + /* add new VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), + vport->vport_id, new_info->vlan_tag, + false); + if (ret) + return ret; + + /* remove old VLAN tag */ + if (old_info->vlan_tag == 0) + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, + true, 0); + else + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + old_info->vlan_tag, true); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clear vport%u port base vlan %u, ret = %d.\n", + vport->vport_id, old_info->vlan_tag, ret); + + return ret; +} + int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, struct hclge_vlan_info *vlan_info) { struct hnae3_handle *nic = &vport->nic; struct hclge_vlan_info *old_vlan_info; - struct hclge_dev *hdev = vport->back; int ret; old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; @@ -10529,38 +10623,12 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info)) goto out; - if (state == HNAE3_PORT_BASE_VLAN_MODIFY) { - /* add new VLAN tag */ - ret = hclge_set_vlan_filter_hw(hdev, - htons(vlan_info->vlan_proto), - vport->vport_id, - vlan_info->vlan_tag, - false); - if (ret) - return ret; - - /* remove old VLAN tag */ - if (old_vlan_info->vlan_tag == 0) - ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, - true, 0); - else - ret = hclge_set_vlan_filter_hw(hdev, - htons(ETH_P_8021Q), - vport->vport_id, - old_vlan_info->vlan_tag, - true); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to clear vport%u port base vlan %u, ret = %d.\n", - vport->vport_id, old_vlan_info->vlan_tag, ret); - return ret; - } - - goto out; - } - - ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, - old_vlan_info); + if (state == HNAE3_PORT_BASE_VLAN_MODIFY) + ret = hclge_modify_port_base_vlan_tag(vport, vlan_info, + old_vlan_info); + else + ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, + old_vlan_info); if (ret) return ret; @@ -12310,19 +12378,42 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, *max_rss_size = hdev->pf_rss_size_max; } +static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; + struct hclge_dev *hdev = vport->back; + u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_valid[HCLGE_MAX_TC_NUM]; + u16 roundup_size; + unsigned int i; + + roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); + roundup_size = ilog2(roundup_size); + /* Set the RSS TC mode according to the new RSS size */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = vport->nic.kinfo.rss_size * i; + } + + return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); +} + static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, bool rxfh_configured) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hclge_vport *vport = hclge_get_vport(handle); struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; - u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; struct hclge_dev *hdev = vport->back; - u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; u16 cur_rss_size = kinfo->rss_size; u16 cur_tqps = kinfo->num_tqps; - u16 tc_valid[HCLGE_MAX_TC_NUM]; - u16 roundup_size; u32 *rss_indir; unsigned int i; int ret; @@ -12335,20 +12426,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, return ret; } - roundup_size = roundup_pow_of_two(kinfo->rss_size); - roundup_size = ilog2(roundup_size); - /* Set the RSS TC mode according to the new RSS size */ - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - tc_valid[i] = 0; - - if (!(hdev->hw_tc_map & BIT(i))) - continue; - - tc_valid[i] = 1; - tc_size[i] = roundup_size; - tc_offset[i] = kinfo->rss_size * i; - } - ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + ret = hclge_set_rss_tc_mode_cfg(handle); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index c495df2e5953..8b3954b39147 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -181,7 +181,7 @@ static int hclge_get_ring_chain_from_mbx( if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", req->msg.param[i].tqp_index, - vport->nic.kinfo.rss_size - 1); + vport->nic.kinfo.rss_size - 1U); return -EINVAL; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index fd0e20190b90..4200d0b6d931 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -4,6 +4,10 @@ #ifndef __HCLGE_MDIO_H #define __HCLGE_MDIO_H +#include "hnae3.h" + +struct hclge_dev; + int hclge_mac_mdio_config(struct hclge_dev *hdev); int hclge_mac_connect_phy(struct hnae3_handle *handle); void hclge_mac_disconnect_phy(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h index 7a9b77de632a..bbee74cd8404 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -8,6 +8,9 @@ #include <linux/net_tstamp.h> #include <linux/types.h> +struct hclge_dev; +struct ifreq; + #define HCLGE_PTP_REG_OFFSET 0x29000 #define HCLGE_PTP_TX_TS_SEQID_REG 0x0 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 1db7f40b4525..619cc30a2dfc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -6,6 +6,12 @@ #include <linux/types.h> +#include "hnae3.h" + +struct hclge_dev; +struct hclge_vport; +enum hclge_opcode_type; + /* MAC Pause */ #define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0) #define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c index 06586173add7..998717f02136 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -814,7 +814,6 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain, { struct hinic_hwif *hwif = attr->hwif; struct pci_dev *pdev = hwif->pdev; - size_t cell_ctxt_size; chain->hwif = hwif; chain->chain_type = attr->chain_type; @@ -826,8 +825,8 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain, sema_init(&chain->sem, 1); - cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); - chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL); + chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells, + sizeof(*chain->cell_ctxt), GFP_KERNEL); if (!chain->cell_ctxt) return -ENOMEM; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 307a6d4af993..a627237f694b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -796,11 +796,10 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev, struct hinic_cmdq_ctxt *cmdq_ctxts; struct pci_dev *pdev = hwif->pdev; struct hinic_pfhwdev *pfhwdev; - size_t cmdq_ctxts_size; int err; - cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts); - cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL); + cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES, + sizeof(*cmdq_ctxts), GFP_KERNEL); if (!cmdq_ctxts) return -ENOMEM; @@ -884,7 +883,6 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); struct pci_dev *pdev = hwif->pdev; struct hinic_hwdev *hwdev; - size_t saved_wqs_size; u16 max_wqe_size; int err; @@ -895,8 +893,8 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, if (!cmdqs->cmdq_buf_pool) return -ENOMEM; - saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq); - cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL); + cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES, + sizeof(*cmdqs->saved_wqs), GFP_KERNEL); if (!cmdqs->saved_wqs) { err = -ENOMEM; goto err_saved_wqs; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 657a15447bd0..2127a48749a8 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -162,7 +162,6 @@ static int init_msix(struct hinic_hwdev *hwdev) struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; int nr_irqs, num_aeqs, num_ceqs; - size_t msix_entries_size; int i, err; num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); @@ -171,8 +170,8 @@ static int init_msix(struct hinic_hwdev *hwdev) if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif)) nr_irqs = HINIC_HWIF_NUM_IRQS(hwif); - msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries); - hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size, + hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs, + sizeof(*hwdev->msix_entries), GFP_KERNEL); if (!hwdev->msix_entries) return -ENOMEM; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index d3fc05a07fdb..045c47786a04 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -631,16 +631,15 @@ static int alloc_eq_pages(struct hinic_eq *eq) struct hinic_hwif *hwif = eq->hwif; struct pci_dev *pdev = hwif->pdev; u32 init_val, addr, val; - size_t addr_size; int err, pg; - addr_size = eq->num_pages * sizeof(*eq->dma_addr); - eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); + eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages, + sizeof(*eq->dma_addr), GFP_KERNEL); if (!eq->dma_addr) return -ENOMEM; - addr_size = eq->num_pages * sizeof(*eq->virt_addr); - eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); + eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages, + sizeof(*eq->virt_addr), GFP_KERNEL); if (!eq->virt_addr) { err = -ENOMEM; goto err_virt_addr_alloc; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c index a6e43d686293..c4a0ba6e183a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c @@ -375,31 +375,30 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, { struct hinic_hwif *hwif = func_to_io->hwif; struct pci_dev *pdev = hwif->pdev; - size_t qps_size, wq_size, db_size; void *ci_addr_base; int i, j, err; - qps_size = num_qps * sizeof(*func_to_io->qps); - func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL); + func_to_io->qps = devm_kcalloc(&pdev->dev, num_qps, + sizeof(*func_to_io->qps), GFP_KERNEL); if (!func_to_io->qps) return -ENOMEM; - wq_size = num_qps * sizeof(*func_to_io->sq_wq); - func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); + func_to_io->sq_wq = devm_kcalloc(&pdev->dev, num_qps, + sizeof(*func_to_io->sq_wq), GFP_KERNEL); if (!func_to_io->sq_wq) { err = -ENOMEM; goto err_sq_wq; } - wq_size = num_qps * sizeof(*func_to_io->rq_wq); - func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); + func_to_io->rq_wq = devm_kcalloc(&pdev->dev, num_qps, + sizeof(*func_to_io->rq_wq), GFP_KERNEL); if (!func_to_io->rq_wq) { err = -ENOMEM; goto err_rq_wq; } - db_size = num_qps * sizeof(*func_to_io->sq_db); - func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL); + func_to_io->sq_db = devm_kcalloc(&pdev->dev, num_qps, + sizeof(*func_to_io->sq_db), GFP_KERNEL); if (!func_to_io->sq_db) { err = -ENOMEM; goto err_sq_db; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index 7f0f1aa3cedd..2d9b06d7caad 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -193,20 +193,20 @@ static int alloc_page_arrays(struct hinic_wqs *wqs) { struct hinic_hwif *hwif = wqs->hwif; struct pci_dev *pdev = hwif->pdev; - size_t size; - size = wqs->num_pages * sizeof(*wqs->page_paddr); - wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages, + sizeof(*wqs->page_paddr), GFP_KERNEL); if (!wqs->page_paddr) return -ENOMEM; - size = wqs->num_pages * sizeof(*wqs->page_vaddr); - wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages, + sizeof(*wqs->page_vaddr), GFP_KERNEL); if (!wqs->page_vaddr) goto err_page_vaddr; - size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr); - wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages, + sizeof(*wqs->shadow_page_vaddr), + GFP_KERNEL); if (!wqs->shadow_page_vaddr) goto err_page_shadow_vaddr; @@ -379,15 +379,14 @@ static int alloc_wqes_shadow(struct hinic_wq *wq) { struct hinic_hwif *hwif = wq->hwif; struct pci_dev *pdev = hwif->pdev; - size_t size; - size = wq->num_q_pages * wq->max_wqe_size; - wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages, + wq->max_wqe_size, GFP_KERNEL); if (!wq->shadow_wqe) return -ENOMEM; - size = wq->num_q_pages * sizeof(wq->prod_idx); - wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages, + sizeof(wq->prod_idx), GFP_KERNEL); if (!wq->shadow_idx) goto err_shadow_idx; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index f9a766b8ac43..1e1b1be86174 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -144,13 +144,12 @@ static int create_txqs(struct hinic_dev *nic_dev) { int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; - size_t txq_size; if (nic_dev->txqs) return -EINVAL; - txq_size = num_txqs * sizeof(*nic_dev->txqs); - nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL); + nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs, + sizeof(*nic_dev->txqs), GFP_KERNEL); if (!nic_dev->txqs) return -ENOMEM; @@ -241,13 +240,12 @@ static int create_rxqs(struct hinic_dev *nic_dev) { int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; - size_t rxq_size; if (nic_dev->rxqs) return -EINVAL; - rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); - nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL); + nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs, + sizeof(*nic_dev->rxqs), GFP_KERNEL); if (!nic_dev->rxqs) return -ENOMEM; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index a78c398bf5b2..01e7d3c0b68e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -8,6 +8,7 @@ #include <linux/interrupt.h> #include <linux/etherdevice.h> #include <linux/netdevice.h> +#include <linux/module.h> #include "hinic_hw_dev.h" #include "hinic_dev.h" diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index c5bdb0d374ef..a984a7a6dd2e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -862,7 +862,6 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_hwdev *hwdev = nic_dev->hwdev; int err, irqname_len; - size_t sges_size; txq->netdev = netdev; txq->sq = sq; @@ -871,13 +870,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, txq->max_sges = HINIC_MAX_SQ_BUFDESCS; - sges_size = txq->max_sges * sizeof(*txq->sges); - txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); + txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges, + sizeof(*txq->sges), GFP_KERNEL); if (!txq->sges) return -ENOMEM; - sges_size = txq->max_sges * sizeof(*txq->free_sges); - txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); + txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges, + sizeof(*txq->free_sges), GFP_KERNEL); if (!txq->free_sges) { err = -ENOMEM; goto err_alloc_free_sges; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index ad9a2819e202..59536bd5cab1 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -308,7 +308,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, if (adapter->fw_done_rc) { dev_err(dev, "Couldn't map LTB, rc = %d\n", adapter->fw_done_rc); - rc = -1; + rc = -EIO; goto out; } rc = 0; @@ -540,13 +540,15 @@ static int init_stats_token(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; dma_addr_t stok; + int rc; stok = dma_map_single(dev, &adapter->stats, sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE); - if (dma_mapping_error(dev, stok)) { - dev_err(dev, "Couldn't map stats buffer\n"); - return -1; + rc = dma_mapping_error(dev, stok); + if (rc) { + dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc); + return rc; } adapter->stats_token = stok; @@ -628,17 +630,9 @@ static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) old_buff_size = adapter->prev_rx_buf_sz; new_buff_size = adapter->cur_rx_buf_sz; - /* Require buff size to be exactly same for now */ - if (old_buff_size != new_buff_size) - return false; - - if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) - return true; - - if (old_num_pools < adapter->min_rx_queues || - old_num_pools > adapter->max_rx_queues || - old_pool_size < adapter->min_rx_add_entries_per_subcrq || - old_pool_size > adapter->max_rx_add_entries_per_subcrq) + if (old_buff_size != new_buff_size || + old_num_pools != new_num_pools || + old_pool_size != new_pool_size) return false; return true; @@ -663,7 +657,7 @@ static int init_rx_pools(struct net_device *netdev) u64 num_pools; u64 pool_size; /* # of buffers in one pool */ u64 buff_size; - int i, j; + int i, j, rc; pool_size = adapter->req_rx_add_entries_per_subcrq; num_pools = adapter->req_rx_queues; @@ -682,7 +676,7 @@ static int init_rx_pools(struct net_device *netdev) GFP_KERNEL); if (!adapter->rx_pool) { dev_err(dev, "Failed to allocate rx pools\n"); - return -1; + return -ENOMEM; } /* Set num_active_rx_pools early. If we fail below after partial @@ -705,6 +699,7 @@ static int init_rx_pools(struct net_device *netdev) GFP_KERNEL); if (!rx_pool->free_map) { dev_err(dev, "Couldn't alloc free_map %d\n", i); + rc = -ENOMEM; goto out_release; } @@ -713,6 +708,7 @@ static int init_rx_pools(struct net_device *netdev) GFP_KERNEL); if (!rx_pool->rx_buff) { dev_err(dev, "Couldn't alloc rx buffers\n"); + rc = -ENOMEM; goto out_release; } } @@ -726,8 +722,9 @@ update_ltb: dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", i, rx_pool->size, rx_pool->buff_size); - if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, - rx_pool->size * rx_pool->buff_size)) + rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff, + rx_pool->size * rx_pool->buff_size); + if (rc) goto out; for (j = 0; j < rx_pool->size; ++j) { @@ -764,7 +761,7 @@ out: /* We failed to allocate one or more LTBs or map them on the VIOS. * Hold onto the pools and any LTBs that we did allocate/map. */ - return -1; + return rc; } static void release_vpd_data(struct ibmvnic_adapter *adapter) @@ -825,13 +822,13 @@ static int init_one_tx_pool(struct net_device *netdev, sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); if (!tx_pool->tx_buff) - return -1; + return -ENOMEM; tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); if (!tx_pool->free_map) { kfree(tx_pool->tx_buff); tx_pool->tx_buff = NULL; - return -1; + return -ENOMEM; } for (i = 0; i < pool_size; i++) @@ -874,17 +871,9 @@ static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) old_mtu = adapter->prev_mtu; new_mtu = adapter->req_mtu; - /* Require MTU to be exactly same to reuse pools for now */ - if (old_mtu != new_mtu) - return false; - - if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) - return true; - - if (old_num_pools < adapter->min_tx_queues || - old_num_pools > adapter->max_tx_queues || - old_pool_size < adapter->min_tx_entries_per_subcrq || - old_pool_size > adapter->max_tx_entries_per_subcrq) + if (old_mtu != new_mtu || + old_num_pools != new_num_pools || + old_pool_size != new_pool_size) return false; return true; @@ -930,7 +919,7 @@ static int init_tx_pools(struct net_device *netdev) adapter->tx_pool = kcalloc(num_pools, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); if (!adapter->tx_pool) - return -1; + return -ENOMEM; adapter->tso_pool = kcalloc(num_pools, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); @@ -940,7 +929,7 @@ static int init_tx_pools(struct net_device *netdev) if (!adapter->tso_pool) { kfree(adapter->tx_pool); adapter->tx_pool = NULL; - return -1; + return -ENOMEM; } /* Set num_active_tx_pools early. If we fail below after partial @@ -1129,7 +1118,7 @@ static int ibmvnic_login(struct net_device *netdev) retry = false; if (retry_count > retries) { netdev_warn(netdev, "Login attempts exceeded\n"); - return -1; + return -EACCES; } adapter->init_done_rc = 0; @@ -1170,25 +1159,26 @@ static int ibmvnic_login(struct net_device *netdev) timeout)) { netdev_warn(netdev, "Capabilities query timed out\n"); - return -1; + return -ETIMEDOUT; } rc = init_sub_crqs(adapter); if (rc) { netdev_warn(netdev, "SCRQ initialization failed\n"); - return -1; + return rc; } rc = init_sub_crq_irqs(adapter); if (rc) { netdev_warn(netdev, "SCRQ irq initialization failed\n"); - return -1; + return rc; } } else if (adapter->init_done_rc) { - netdev_warn(netdev, "Adapter login failed\n"); - return -1; + netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n", + adapter->init_done_rc); + return -EIO; } } while (retry); @@ -1247,7 +1237,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { netdev_err(netdev, "timeout setting link state\n"); - return -1; + return -ETIMEDOUT; } if (adapter->init_done_rc == PARTIALSUCCESS) { @@ -2304,7 +2294,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, /* If someone else changed the adapter state * when we dropped the rtnl, fail the reset */ - rc = -1; + rc = -EAGAIN; goto out; } adapter->state = VNIC_CLOSED; @@ -2346,10 +2336,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, } rc = ibmvnic_reset_init(adapter, true); - if (rc) { - rc = IBMVNIC_INIT_FAILED; + if (rc) goto out; - } /* If the adapter was in PROBE or DOWN state prior to the reset, * exit here. @@ -3779,7 +3767,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter) allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); if (!allqueues) - return -1; + return -ENOMEM; for (i = 0; i < total_queues; i++) { allqueues[i] = init_sub_crq_queue(adapter); @@ -3848,7 +3836,7 @@ tx_failed: for (i = 0; i < registered_queues; i++) release_sub_crq_queue(adapter, allqueues[i], 1); kfree(allqueues); - return -1; + return -ENOMEM; } static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) @@ -4207,7 +4195,7 @@ static int send_login(struct ibmvnic_adapter *adapter) if (!adapter->tx_scrq || !adapter->rx_scrq) { netdev_err(adapter->netdev, "RX or TX queues are not allocated, device login failed\n"); - return -1; + return -ENOMEM; } release_login_buffer(adapter); @@ -4327,7 +4315,7 @@ buf_map_failed: kfree(login_buffer); adapter->login_buf = NULL; buf_alloc_failed: - return -1; + return -ENOMEM; } static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, @@ -5648,7 +5636,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Initialization sequence timed out\n"); - return -1; + return -ETIMEDOUT; } if (adapter->init_done_rc) { @@ -5659,7 +5647,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) if (adapter->from_passive_init) { adapter->state = VNIC_OPEN; adapter->from_passive_init = false; - return -1; + return -EINVAL; } if (reset && diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index b8e42f67d897..4a8f36e0ab07 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -18,8 +18,6 @@ #define IBMVNIC_NAME "ibmvnic" #define IBMVNIC_DRIVER_VERSION "1.0.1" #define IBMVNIC_INVALID_MAP -1 -#define IBMVNIC_STATS_TIMEOUT 1 -#define IBMVNIC_INIT_FAILED 2 #define IBMVNIC_OPEN_FAILED 3 /* basic structures plus 100 2k buffers */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 44e2dc8328a2..635a95927e93 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3614,10 +3614,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) return -EINVAL; - /* flags reserved for future extensions - must be zero */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 291e61ac3e44..2c1b1da1220e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); return; } + if (vsi->type != I40E_VSI_MAIN && + vsi->type != I40E_VSI_FDIR && + vsi->type != I40E_VSI_VMDQ2) { + dev_info(&pf->pdev->dev, + "vsi %d type %d descriptor rings not available\n", + vsi_seid, vsi->type); + return; + } if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) { dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid); return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 09b1d5aed1c9..61e5789d78db 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -1205,10 +1205,6 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work); - /* Reserved for future extensions. */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: pf->ptp_tx = false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 80ae264c99ba..2ea4deb8fc44 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1949,6 +1949,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, } /** + * i40e_sync_vf_state + * @vf: pointer to the VF info + * @state: VF state + * + * Called from a VF message to synchronize the service with a potential + * VF reset state + **/ +static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state) +{ + int i; + + /* When handling some messages, it needs VF state to be set. + * It is possible that this flag is cleared during VF reset, + * so there is a need to wait until the end of the reset to + * handle the request message correctly. + */ + for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) { + if (test_bit(state, &vf->vf_states)) + return true; + usleep_range(10000, 20000); + } + + return test_bit(state, &vf->vf_states); +} + +/** * i40e_vc_get_version_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2008,7 +2034,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) size_t len = 0; int ret; - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -2131,7 +2157,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) bool allmulti = false; bool alluni = false; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err_out; } @@ -2219,7 +2245,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) struct i40e_vsi *vsi; u16 num_qps_all = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2368,7 +2394,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2540,7 +2566,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2590,7 +2616,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) u8 cur_pairs = vf->num_queue_pairs; struct i40e_pf *pf = vf->pf; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) return -EINVAL; if (req_pairs > I40E_MAX_VF_QUEUES) { @@ -2635,7 +2661,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) memset(&stats, 0, sizeof(struct i40e_eth_stats)); - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2752,7 +2778,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2824,7 +2850,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2968,7 +2994,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -3088,9 +3114,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) struct i40e_vsi *vsi = NULL; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || - (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { + vrk->key_len != I40E_HKEY_ARRAY_SIZE) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3119,9 +3145,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; u16 i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || - (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { + vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3154,7 +3180,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int len = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3190,7 +3216,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) struct i40e_hw *hw = &pf->hw; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3215,7 +3241,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; struct i40e_vsi *vsi; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3241,7 +3267,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; struct i40e_vsi *vsi; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3468,7 +3494,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i, ret; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3599,7 +3625,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i, ret; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err_out; } @@ -3708,7 +3734,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; u64 speed = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3797,11 +3823,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) /* set this flag only after making sure all inputs are sane */ vf->adq_enabled = true; - /* num_req_queues is set when user changes number of queues via ethtool - * and this causes issue for default VSI(which depends on this variable) - * when ADq is enabled, hence reset it. - */ - vf->num_req_queues = 0; /* reset the VF in order to allocate resources */ i40e_vc_reset_vf(vf, true); @@ -3824,7 +3845,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 091e32c1bb46..49575a640a84 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -18,6 +18,8 @@ #define I40E_MAX_VF_PROMISC_FLAGS 3 +#define I40E_VF_STATE_WAIT_COUNT 20 + /* Various queue ctrls */ enum i40e_queue_ctrl { I40E_QUEUE_CTRL_UNKNOWN = 0, diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 3789269ce741..b5728bdbcf33 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -137,9 +137,13 @@ struct iavf_q_vector { struct iavf_mac_filter { struct list_head list; u8 macaddr[ETH_ALEN]; - bool is_new_mac; /* filter is new, wait for PF decision */ - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ + struct { + u8 is_new_mac:1; /* filter is new, wait for PF decision */ + u8 remove:1; /* filter needs to be removed */ + u8 add:1; /* filter needs to be added */ + u8 is_primary:1; /* filter is a default VF MAC */ + u8 padding:4; + }; }; struct iavf_vlan_filter { diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 27c7b36427d2..3bb56714beb0 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -331,9 +331,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev, **/ static int iavf_get_sset_count(struct net_device *netdev, int sset) { + /* Report the maximum number queues, even if not every queue is + * currently configured. Since allocation of queues is in pairs, + * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set + * at device creation and never changes. + */ + if (sset == ETH_SS_STATS) return IAVF_STATS_LEN + - (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES); + (IAVF_QUEUE_STATS_LEN * 2 * + netdev->real_num_tx_queues); else if (sset == ETH_SS_PRIV_FLAGS) return IAVF_PRIV_FLAGS_STR_LEN; else @@ -360,17 +367,18 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); rcu_read_lock(); - for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) { + /* As num_active_queues describe both tx and rx queues, we can use + * it to iterate over rings' stats. + */ + for (i = 0; i < adapter->num_active_queues; i++) { struct iavf_ring *ring; - /* Avoid accessing un-allocated queues */ - ring = (i < adapter->num_active_queues ? - &adapter->tx_rings[i] : NULL); + /* Tx rings stats */ + ring = &adapter->tx_rings[i]; iavf_add_queue_stats(&data, ring); - /* Avoid accessing un-allocated queues */ - ring = (i < adapter->num_active_queues ? - &adapter->rx_rings[i] : NULL); + /* Rx rings stats */ + ring = &adapter->rx_rings[i]; iavf_add_queue_stats(&data, ring); } rcu_read_unlock(); @@ -407,10 +415,10 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data) iavf_add_stat_strings(&data, iavf_gstrings_stats); - /* Queues are always allocated in pairs, so we just use num_tx_queues - * for both Tx and Rx queues. + /* Queues are always allocated in pairs, so we just use + * real_num_tx_queues for both Tx and Rx queues. */ - for (i = 0; i < netdev->num_tx_queues; i++) { + for (i = 0; i < netdev->real_num_tx_queues; i++) { iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, "tx", i); iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, @@ -623,23 +631,44 @@ static int iavf_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_tx_count = clamp_t(u32, ring->tx_pending, - IAVF_MIN_TXD, - IAVF_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (ring->tx_pending > IAVF_MAX_TXD || + ring->tx_pending < IAVF_MIN_TXD || + ring->rx_pending > IAVF_MAX_RXD || + ring->rx_pending < IAVF_MIN_RXD) { + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", + ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD, + IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (new_tx_count != ring->tx_pending) + netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", + new_tx_count); - new_rx_count = clamp_t(u32, ring->rx_pending, - IAVF_MIN_RXD, - IAVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (new_rx_count != ring->rx_pending) + netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", + new_rx_count); /* if nothing to do return success */ if ((new_tx_count == adapter->tx_desc_count) && - (new_rx_count == adapter->rx_desc_count)) + (new_rx_count == adapter->rx_desc_count)) { + netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); return 0; + } - adapter->tx_desc_count = new_tx_count; - adapter->rx_desc_count = new_rx_count; + if (new_tx_count != adapter->tx_desc_count) { + netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n", + adapter->tx_desc_count, new_tx_count); + adapter->tx_desc_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_desc_count) { + netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n", + adapter->rx_desc_count, new_rx_count); + adapter->rx_desc_count = new_rx_count; + } if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; @@ -1910,7 +1939,7 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, * @key: hash key * @hfunc: hash function to use * - * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, @@ -1919,19 +1948,21 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + /* Only support toeplitz hash function */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (!indir) + + if (!key && !indir) return 0; if (key) memcpy(adapter->rss_key, key, adapter->rss_key_size); - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - adapter->rss_lut[i] = (u8)(indir[i]); + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = (u8)(indir[i]); + } return iavf_config_rss(adapter); } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 14934a7a13ef..3a820b3ff466 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -463,14 +463,14 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-TxRx-%d", basename, rx_int_idx++); + "iavf-%s-TxRx-%u", basename, rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-rx-%d", basename, rx_int_idx++); + "iavf-%s-rx-%u", basename, rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-tx-%d", basename, tx_int_idx++); + "iavf-%s-tx-%u", basename, tx_int_idx++); } else { /* skip this unused q_vector */ continue; @@ -2248,6 +2248,7 @@ static void iavf_reset_task(struct work_struct *work) } pci_set_master(adapter->pdev); + pci_restore_msi_state(adapter->pdev); if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", @@ -2910,7 +2911,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2920,7 +2921,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2955,7 +2956,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", match.mask->vlan_id); - return IAVF_ERR_CONFIG; + return -EINVAL; } } vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); @@ -2979,7 +2980,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2989,13 +2990,13 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); - return IAVF_ERR_CONFIG; + return -EINVAL; } if (match.key->dst) { vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); @@ -3016,7 +3017,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, if (ipv6_addr_any(&match.mask->dst)) { dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", IPV6_ADDR_ANY); - return IAVF_ERR_CONFIG; + return -EINVAL; } /* src and dest IPv6 address should not be LOOPBACK @@ -3026,7 +3027,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, ipv6_addr_loopback(&match.key->src)) { dev_err(&adapter->pdev->dev, "ipv6 addr should not be loopback\n"); - return IAVF_ERR_CONFIG; + return -EINVAL; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) @@ -3051,7 +3052,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", be16_to_cpu(match.mask->src)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -3061,7 +3062,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", be16_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } if (match.key->dst) { @@ -3428,6 +3429,8 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) { struct iavf_adapter *adapter = netdev_priv(netdev); + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (CLIENT_ENABLED(adapter)) { iavf_notify_client_l2_params(&adapter->vsi); @@ -3998,6 +4001,7 @@ static void iavf_remove(struct pci_dev *pdev) if (iavf_lock_timeout(&adapter->crit_lock, 5000)) dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); + dev_info(&adapter->pdev->dev, "Removing device\n"); /* Shut down all the garbage mashers on the detention level */ iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 3525eab8e9f9..42c9f9dc235c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1766,7 +1766,7 @@ tx_only: if (likely(napi_complete_done(napi, work_done))) iavf_update_enable_itr(vsi, q_vector); - return min(work_done, budget - 1); + return min_t(int, work_done, budget - 1); } /** diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index d60bf7c21200..cfa1f0e0e2fe 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -762,15 +762,23 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) if (flags & FLAG_VF_MULTICAST_PROMISC) { adapter->flags |= IAVF_FLAG_ALLMULTI_ON; adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; - dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); + dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", + adapter->netdev->name); } if (!flags) { - adapter->flags &= ~(IAVF_FLAG_PROMISC_ON | - IAVF_FLAG_ALLMULTI_ON); - adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC | - IAVF_FLAG_AQ_RELEASE_ALLMULTI); - dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + if (adapter->flags & IAVF_FLAG_PROMISC_ON) { + adapter->flags &= ~IAVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; + dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + } + + if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { + adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; + dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", + adapter->netdev->name); + } } adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; @@ -1017,7 +1025,7 @@ print_link_msg: } else if (link_speed_mbps == SPEED_UNKNOWN) { snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); } else { - snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s", + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", link_speed_mbps, "Mbps"); } @@ -1522,7 +1530,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_print_link_message(adapter); break; case VIRTCHNL_EVENT_RESET_IMPENDING: - dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n"); + dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b67ad51cbcc9..6be7bc87f70c 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -848,7 +848,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup); int ice_plug_aux_dev(struct ice_pf *pf); void ice_unplug_aux_dev(struct ice_pf *pf); int ice_init_rdma(struct ice_pf *pf); -const char *ice_stat_str(enum ice_status stat_err); const char *ice_aq_str(enum ice_aq_err aq_err); bool ice_is_wol_supported(struct ice_hw *hw); int diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 1efc635cc0f5..44bdd0ed1629 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -759,7 +759,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_channel *ch = ring->ch; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; u16 pf_q; u8 tc; @@ -804,9 +804,9 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ring->q_handle, 1, qg_buf, buf_len, NULL); if (status) { - dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n", - ice_stat_str(status)); - return -ENODEV; + dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n", + status); + return status; } /* Add Tx Queue TEID into the VSI Tx ring from the @@ -929,7 +929,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, struct ice_pf *pf = vsi->back; struct ice_q_vector *q_vector; struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; u32 val; /* clear cause_ena bit for disabled queues */ @@ -953,18 +953,18 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, rel_vmvf_num, NULL); /* if the disable queue command was exercised during an - * active reset flow, ICE_ERR_RESET_ONGOING is returned. + * active reset flow, -EBUSY is returned. * This is not an error as the reset operation disables * queues at the hardware level anyway. */ - if (status == ICE_ERR_RESET_ONGOING) { + if (status == -EBUSY) { dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n"); - } else if (status == ICE_ERR_DOES_NOT_EXIST) { + } else if (status == -ENOENT) { dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); } else if (status) { - dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n", - ice_stat_str(status)); - return -ENODEV; + dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n", + status); + return status; } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index b3066d0fea8b..cc8297e1db3a 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2,7 +2,6 @@ /* Copyright (c) 2018, Intel Corporation. */ #include "ice_common.h" -#include "ice_lib.h" #include "ice_sched.h" #include "ice_adminq_cmd.h" #include "ice_flow.h" @@ -16,10 +15,10 @@ * This function sets the MAC type of the adapter based on the * vendor ID and device ID stored in the HW structure. */ -static enum ice_status ice_set_mac_type(struct ice_hw *hw) +static int ice_set_mac_type(struct ice_hw *hw) { if (hw->vendor_id != PCI_VENDOR_ID_INTEL) - return ICE_ERR_DEVICE_NOT_SUPPORTED; + return -ENODEV; switch (hw->device_id) { case ICE_DEV_ID_E810C_BACKPLANE: @@ -99,7 +98,7 @@ bool ice_is_e810t(struct ice_hw *hw) * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port * configuration, flow director filters, etc.). */ -enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) +int ice_clear_pf_cfg(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -123,21 +122,21 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) * ice_discover_dev_caps is expected to be called before this function is * called. */ -static enum ice_status +static int ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_manage_mac_read_resp *resp; struct ice_aqc_manage_mac_read *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; u16 flags; u8 i; cmd = &desc.params.mac_read; if (buf_size < sizeof(*resp)) - return ICE_ERR_BUF_TOO_SHORT; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); @@ -150,7 +149,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); - return ICE_ERR_CFG; + return -EIO; } /* A single port can report up to two (LAN and WoL) addresses */ @@ -176,7 +175,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, * * Returns the various PHY capabilities supported on the Port (0x0600) */ -enum ice_status +int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *pcaps, struct ice_sq_cd *cd) @@ -184,18 +183,18 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps *cmd; u16 pcaps_size = sizeof(*pcaps); struct ice_aq_desc desc; - enum ice_status status; struct ice_hw *hw; + int status; cmd = &desc.params.get_phy; if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; if (report_mode == ICE_AQC_REPORT_DFLT_CFG && !ice_fw_supports_report_dflt_cfg(hw)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); @@ -252,7 +251,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, * returns error (ENOENT), then no cage present. If no cage present, then * connection type is backplane or BASE-T. */ -static enum ice_status +static int ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, struct ice_sq_cd *cd) { @@ -418,7 +417,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) * * Get Link Status (0x607). Returns the link status of the adapter. */ -enum ice_status +int ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd) { @@ -429,12 +428,12 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_fc_info *hw_fc_info; bool tx_pause, rx_pause; struct ice_aq_desc desc; - enum ice_status status; struct ice_hw *hw; u16 cmd_flags; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; li_old = &pi->phy.link_info_old; hw_media_type = &pi->phy.media_type; @@ -556,7 +555,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, * * Set MAC configuration (0x0603) */ -enum ice_status +int ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) { struct ice_aqc_set_mac_cfg *cmd; @@ -565,7 +564,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) cmd = &desc.params.set_mac_cfg; if (max_frame_size == 0) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); @@ -580,17 +579,17 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) * ice_init_fltr_mgmt_struct - initializes filter management list and locks * @hw: pointer to the HW struct */ -static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) +static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) { struct ice_switch_info *sw; - enum ice_status status; + int status; hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*hw->switch_info), GFP_KERNEL); sw = hw->switch_info; if (!sw) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; INIT_LIST_HEAD(&sw->vsi_list_map_head); sw->prof_res_bm_init = 0; @@ -666,17 +665,17 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) * ice_get_fw_log_cfg - get FW logging configuration * @hw: pointer to the HW struct */ -static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) +static int ice_get_fw_log_cfg(struct ice_hw *hw) { struct ice_aq_desc desc; - enum ice_status status; __le16 *config; + int status; u16 size; size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); if (!config) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); @@ -738,15 +737,15 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) * messages from FW to SW. Interrupts are typically disabled during the device's * initialization phase. */ -static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) +static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) { struct ice_aqc_fw_logging *cmd; - enum ice_status status = 0; u16 i, chgs = 0, len = 0; struct ice_aq_desc desc; __le16 *data = NULL; u8 actv_evnts = 0; void *buf = NULL; + int status = 0; if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) return 0; @@ -790,7 +789,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) sizeof(*data), GFP_KERNEL); if (!data) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } val = i << ICE_AQC_FW_LOG_ID_S; @@ -904,12 +903,12 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ -enum ice_status ice_init_hw(struct ice_hw *hw) +int ice_init_hw(struct ice_hw *hw) { struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; u16 mac_buf_len; void *mac_buf; + int status; /* Set MAC type based on DeviceID */ status = ice_set_mac_type(hw); @@ -956,7 +955,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*hw->port_info), GFP_KERNEL); if (!hw->port_info) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_cqinit; } @@ -985,7 +984,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_sched; } @@ -1006,7 +1005,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) /* need a valid SW entry point to build a Tx tree */ if (!hw->sw_entry_point_layer) { ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); - status = ICE_ERR_CFG; + status = -EIO; goto err_unroll_sched; } INIT_LIST_HEAD(&hw->agg_list); @@ -1026,7 +1025,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); if (!mac_buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_fltr_mgmt_struct; } @@ -1096,7 +1095,7 @@ void ice_deinit_hw(struct ice_hw *hw) * ice_check_reset - Check to see if a global reset is complete * @hw: pointer to the hardware structure */ -enum ice_status ice_check_reset(struct ice_hw *hw) +int ice_check_reset(struct ice_hw *hw) { u32 cnt, reg = 0, grst_timeout, uld_mask; @@ -1116,7 +1115,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) if (cnt == grst_timeout) { ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); - return ICE_ERR_RESET_FAILED; + return -EIO; } #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ @@ -1143,7 +1142,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", reg); - return ICE_ERR_RESET_FAILED; + return -EIO; } return 0; @@ -1156,7 +1155,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) * If a global reset has been triggered, this function checks * for its completion and then issues the PF reset */ -static enum ice_status ice_pf_reset(struct ice_hw *hw) +static int ice_pf_reset(struct ice_hw *hw) { u32 cnt, reg; @@ -1169,7 +1168,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { /* poll on global reset currently in progress until done */ if (ice_check_reset(hw)) - return ICE_ERR_RESET_FAILED; + return -EIO; return 0; } @@ -1194,7 +1193,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); - return ICE_ERR_RESET_FAILED; + return -EIO; } return 0; @@ -1212,7 +1211,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) * This has to be cleared using ice_clear_pxe_mode again, once the AQ * interface has been restored in the rebuild flow. */ -enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) +int ice_reset(struct ice_hw *hw, enum ice_reset_req req) { u32 val = 0; @@ -1228,7 +1227,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) val = GLGEN_RTRIG_GLOBR_M; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } val |= rd32(hw, GLGEN_RTRIG); @@ -1247,16 +1246,16 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) * * Copies rxq context from dense structure to HW register space */ -static enum ice_status +static int ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) { u8 i; if (!ice_rxq_ctx) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (rxq_index > QRX_CTRL_MAX_INDEX) - return ICE_ERR_PARAM; + return -EINVAL; /* Copy each dword separately to HW */ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { @@ -1306,14 +1305,14 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * it to HW register space and enables the hardware to prefetch descriptors * instead of only fetching them on demand */ -enum ice_status +int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index) { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; if (!rlan_ctx) - return ICE_ERR_BAD_PTR; + return -EINVAL; rlan_ctx->prefena = 1; @@ -1369,9 +1368,8 @@ static int ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { - return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw), - (struct ice_aq_desc *)desc, - buf, buf_size, cd)); + return ice_sq_send_cmd(hw, ice_get_sbq(hw), + (struct ice_aq_desc *)desc, buf, buf_size, cd); } /** @@ -1453,17 +1451,17 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode) * Retry sending the FW Admin Queue command, multiple times, to the FW Admin * Queue if the EBUSY AQ error is returned. */ -static enum ice_status +static int ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aq_desc desc_cpy; - enum ice_status status; bool is_cmd_for_retry; u8 *buf_cpy = NULL; u8 idx = 0; u16 opcode; + int status; opcode = le16_to_cpu(desc->opcode); is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); @@ -1473,7 +1471,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (buf) { buf_cpy = kzalloc(buf_size, GFP_KERNEL); if (!buf_cpy) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } memcpy(&desc_cpy, desc, sizeof(desc_cpy)); @@ -1510,13 +1508,13 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, * * Helper function to send FW Admin Queue commands to the FW Admin Queue. */ -enum ice_status +int ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd = &desc->params.res_owner; bool lock_acquired = false; - enum ice_status status; + int status; /* When a package download is in process (i.e. when the firmware's * Global Configuration Lock resource is held), only the Download @@ -1555,11 +1553,11 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, * * Get the firmware version (0x0001) from the admin queue commands */ -enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) +int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) { struct ice_aqc_get_ver *resp; struct ice_aq_desc desc; - enum ice_status status; + int status; resp = &desc.params.get_ver; @@ -1590,7 +1588,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) * * Send the driver version (0x0002) to the firmware */ -enum ice_status +int ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd) { @@ -1601,7 +1599,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, cmd = &desc.params.driver_ver; if (!dv) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); @@ -1627,7 +1625,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well (0x0003). */ -enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) +int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) { struct ice_aqc_q_shutdown *cmd; struct ice_aq_desc desc; @@ -1654,12 +1652,12 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * Requests common resource using the admin queue commands (0x0008). * When attempting to acquire the Global Config Lock, the driver can * learn of three states: - * 1) ICE_SUCCESS - acquired lock, and can perform download package - * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load - * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has - * successfully downloaded the package; the driver does - * not have to download the package and can continue - * loading + * 1) 0 - acquired lock, and can perform download package + * 2) -EIO - did not get lock, driver should fail to load + * 3) -EALREADY - did not get lock, but another driver has + * successfully downloaded the package; the driver does + * not have to download the package and can continue + * loading * * Note that if the caller is in an acquire lock, perform action, release lock * phase of operation, it is possible that the FW may detect a timeout and issue @@ -1668,14 +1666,14 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * will likely get an error propagated back to it indicating the Download * Package, Update Package or the Release Resource AQ commands timed out. */ -static enum ice_status +static int ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd_resp; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd_resp = &desc.params.res_owner; @@ -1707,15 +1705,15 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, } else if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_IN_PROG) { *timeout = le32_to_cpu(cmd_resp->timeout); - return ICE_ERR_AQ_ERROR; + return -EIO; } else if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_DONE) { - return ICE_ERR_AQ_NO_WORK; + return -EALREADY; } /* invalid FW response, force a timeout immediately */ *timeout = 0; - return ICE_ERR_AQ_ERROR; + return -EIO; } /* If the resource is held by some other driver, the command completes @@ -1737,7 +1735,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, * * release common resource using the admin queue commands (0x0009) */ -static enum ice_status +static int ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, struct ice_sq_cd *cd) { @@ -1763,23 +1761,23 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, * * This function will attempt to acquire the ownership of a resource. */ -enum ice_status +int ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout) { #define ICE_RES_POLLING_DELAY_MS 10 u32 delay = ICE_RES_POLLING_DELAY_MS; u32 time_left = timeout; - enum ice_status status; + int status; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has + /* A return code of -EALREADY means that another driver has * previously acquired the resource and performed any necessary updates; * in this case the caller does not obtain the resource and has no * further work to do. */ - if (status == ICE_ERR_AQ_NO_WORK) + if (status == -EALREADY) goto ice_acquire_res_exit; if (status) @@ -1792,7 +1790,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, timeout = (timeout > delay) ? timeout - delay : 0; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - if (status == ICE_ERR_AQ_NO_WORK) + if (status == -EALREADY) /* lock free, but no work to do */ break; @@ -1800,15 +1798,15 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, /* lock acquired */ break; } - if (status && status != ICE_ERR_AQ_NO_WORK) + if (status && status != -EALREADY) ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); ice_acquire_res_exit: - if (status == ICE_ERR_AQ_NO_WORK) { + if (status == -EALREADY) { if (access == ICE_RES_WRITE) ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); else - ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); + ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); } return status; } @@ -1822,16 +1820,15 @@ ice_acquire_res_exit: */ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) { - enum ice_status status; u32 total_delay = 0; + int status; status = ice_aq_release_res(hw, res, 0, NULL); /* there are some rare cases when trying to release the resource * results in an admin queue timeout, so handle them correctly */ - while ((status == ICE_ERR_AQ_TIMEOUT) && - (total_delay < hw->adminq.sq_cmd_timeout)) { + while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) { mdelay(1); status = ice_aq_release_res(hw, res, 0, NULL); total_delay++; @@ -1849,7 +1846,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) * * Helper function to allocate/free resources using the admin queue commands */ -enum ice_status +int ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd) @@ -1860,10 +1857,10 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, cmd = &desc.params.sw_res_ctrl; if (!buf) - return ICE_ERR_PARAM; + return -EINVAL; if (buf_size < flex_array_size(buf, elem, num_entries)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); @@ -1882,17 +1879,17 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, * @btm: allocate from bottom * @res: pointer to array that will receive the resources */ -enum ice_status +int ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(buf, elem, num); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Prepare buffer to allocate resource. */ buf->num_elems = cpu_to_le16(num); @@ -1920,16 +1917,16 @@ ice_alloc_res_exit: * @num: number of resources * @res: pointer to array that contains the resources to free */ -enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) +int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(buf, elem, num); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Prepare buffer to free resource. */ buf->num_elems = cpu_to_le16(num); @@ -2486,19 +2483,19 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that * firmware could return) to avoid this. */ -enum ice_status +int ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aqc_list_caps *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.get_cap; if (opc != ice_aqc_opc_list_func_caps && opc != ice_aqc_opc_list_dev_caps) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); @@ -2517,16 +2514,16 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, * Read the device capabilities and extract them into the dev_caps structure * for later use. */ -enum ice_status +int ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) { - enum ice_status status; u32 cap_count = 0; void *cbuf; + int status; cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!cbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum @@ -2551,16 +2548,16 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) * Read the function capabilities and extract them into the func_caps structure * for later use. */ -static enum ice_status +static int ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) { - enum ice_status status; u32 cap_count = 0; void *cbuf; + int status; cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!cbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum @@ -2650,9 +2647,9 @@ void ice_set_safe_mode_caps(struct ice_hw *hw) * ice_get_caps - get info about the HW * @hw: pointer to the hardware structure */ -enum ice_status ice_get_caps(struct ice_hw *hw) +int ice_get_caps(struct ice_hw *hw) { - enum ice_status status; + int status; status = ice_discover_dev_caps(hw, &hw->dev_caps); if (status) @@ -2670,7 +2667,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw) * * This function is used to write MAC address to the NVM (0x0108). */ -enum ice_status +int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd) { @@ -2692,7 +2689,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, * * Tell the firmware that the driver is taking over from PXE (0x0110). */ -static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) +static int ice_aq_clear_pxe_mode(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -2903,15 +2900,15 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, * mode as the PF may not have the privilege to set some of the PHY Config * parameters. This status will be indicated by the command response (0x0601). */ -enum ice_status +int ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) { struct ice_aq_desc desc; - enum ice_status status; + int status; if (!cfg) - return ICE_ERR_PARAM; + return -EINVAL; /* Ensure that only valid bits of cfg->caps can be turned on. */ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { @@ -2952,13 +2949,13 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, * ice_update_link_info - update status of the HW network link * @pi: port info structure of the interested logical port */ -enum ice_status ice_update_link_info(struct ice_port_info *pi) +int ice_update_link_info(struct ice_port_info *pi) { struct ice_link_status *li; - enum ice_status status; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; li = &pi->phy.link_info; @@ -2974,7 +2971,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); @@ -3070,7 +3067,7 @@ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) * @cfg: PHY configuration data to set FC mode * @req_mode: FC mode to configure */ -enum ice_status +int ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode req_mode) { @@ -3078,7 +3075,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, u8 pause_mask = 0x0; if (!pi || !cfg) - return ICE_ERR_BAD_PTR; + return -EINVAL; switch (req_mode) { case ICE_FC_FULL: @@ -3117,23 +3114,23 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, * * Set the requested flow control mode. */ -enum ice_status +int ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi || !aq_failures) - return ICE_ERR_BAD_PTR; + return -EINVAL; *aq_failures = 0; hw = pi->hw; pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Get the current PHY config */ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, @@ -3258,22 +3255,22 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, * @cfg: PHY configuration data to set FEC mode * @fec: FEC mode to configure */ -enum ice_status +int ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) { struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi || !cfg) - return ICE_ERR_BAD_PTR; + return -EINVAL; hw = pi->hw; pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_get_phy_caps(pi, false, (ice_fw_supports_report_dflt_cfg(hw) ? @@ -3313,7 +3310,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, cfg->link_fec_opt |= pcaps->link_fec_options; break; default: - status = ICE_ERR_PARAM; + status = -EINVAL; break; } @@ -3344,13 +3341,13 @@ out: * The variable link_up is invalid if status is non zero. As a * result of this call, link status reporting becomes enabled */ -enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) +int ice_get_link_status(struct ice_port_info *pi, bool *link_up) { struct ice_phy_info *phy_info; - enum ice_status status = 0; + int status = 0; if (!pi || !link_up) - return ICE_ERR_PARAM; + return -EINVAL; phy_info = &pi->phy; @@ -3375,7 +3372,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) * * Sets up the link and restarts the Auto-Negotiation over the link. */ -enum ice_status +int ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd) { @@ -3405,7 +3402,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, * * Set event mask (0x0613) */ -enum ice_status +int ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd) { @@ -3430,7 +3427,7 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, * * Enable/disable loopback on a given port */ -enum ice_status +int ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) { struct ice_aqc_set_mac_lb *cmd; @@ -3453,7 +3450,7 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) * * Set LED value for the given port (0x06e9) */ -enum ice_status +int ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd) { @@ -3488,17 +3485,17 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, * * Read/Write SFF EEPROM (0x06EE) */ -enum ice_status +int ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd) { struct ice_aqc_sff_eeprom *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!data || (mem_addr & 0xff00)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); cmd = &desc.params.read_write_sff_param; @@ -3527,23 +3524,23 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, * * Internal function to get (0x0B05) or set (0x0B03) RSS look up table */ -static enum ice_status +static int __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) { u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; struct ice_aqc_get_set_rss_lut *cmd_resp; struct ice_aq_desc desc; - enum ice_status status; + int status; u8 *lut; if (!params) - return ICE_ERR_PARAM; + return -EINVAL; vsi_handle = params->vsi_handle; lut = params->lut; if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) - return ICE_ERR_PARAM; + return -EINVAL; lut_size = params->lut_size; lut_type = params->lut_type; @@ -3572,7 +3569,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); break; default: - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_get_set_rss_lut_exit; } @@ -3607,7 +3604,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params } fallthrough; default: - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_get_set_rss_lut_exit; } @@ -3626,7 +3623,7 @@ ice_aq_get_set_rss_lut_exit: * * get the RSS lookup table, PF or VSI type */ -enum ice_status +int ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) { return __ice_aq_get_set_rss_lut(hw, get_params, false); @@ -3639,7 +3636,7 @@ ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_ * * set the RSS lookup table, PF or VSI type */ -enum ice_status +int ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) { return __ice_aq_get_set_rss_lut(hw, set_params, true); @@ -3654,10 +3651,9 @@ ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_ * * get (0x0B04) or set (0x0B02) the RSS key per VSI */ -static enum -ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, - struct ice_aqc_get_set_rss_keys *key, - bool set) +static int +__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, + struct ice_aqc_get_set_rss_keys *key, bool set) { struct ice_aqc_get_set_rss_key *cmd_resp; u16 key_size = sizeof(*key); @@ -3688,12 +3684,12 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, * * get the RSS key per VSI */ -enum ice_status +int ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *key) { if (!ice_is_vsi_valid(hw, vsi_handle) || !key) - return ICE_ERR_PARAM; + return -EINVAL; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), key, false); @@ -3707,12 +3703,12 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, * * set the RSS key per VSI */ -enum ice_status +int ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys) { if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) - return ICE_ERR_PARAM; + return -EINVAL; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), keys, true); @@ -3739,7 +3735,7 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue * flow. */ -static enum ice_status +static int ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, struct ice_sq_cd *cd) @@ -3754,10 +3750,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); if (!qg_list) - return ICE_ERR_PARAM; + return -EINVAL; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) - return ICE_ERR_PARAM; + return -EINVAL; for (i = 0, list = qg_list; i < num_qgrps; i++) { sum_size += struct_size(list, txqs, list->num_txqs); @@ -3766,7 +3762,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, } if (buf_size != sum_size) - return ICE_ERR_PARAM; + return -EINVAL; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); @@ -3787,7 +3783,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, * * Disable LAN Tx queue (0x0C31) */ -static enum ice_status +static int ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, enum ice_disq_rst_src rst_src, u16 vmvf_num, @@ -3796,18 +3792,18 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *item; struct ice_aqc_dis_txqs *cmd; struct ice_aq_desc desc; - enum ice_status status; u16 i, sz = 0; + int status; cmd = &desc.params.dis_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); /* qg_list can be NULL only in VM/VF reset flow */ if (!qg_list && !rst_src) - return ICE_ERR_PARAM; + return -EINVAL; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) - return ICE_ERR_PARAM; + return -EINVAL; cmd->num_entries = num_qgrps; @@ -3856,7 +3852,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, } if (buf_size != sz) - return ICE_ERR_PARAM; + return -EINVAL; do_aq: status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); @@ -3914,8 +3910,7 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, cmd->num_qset_grps = num_qset_grps; - return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list, - buf_size, cd)); + return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); } /* End of FW Admin Queue command wrappers */ @@ -4111,7 +4106,7 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * @dest_ctx: pointer to memory for the packed structure * @ce_info: a description of the structure to be transformed */ -enum ice_status +int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { @@ -4141,7 +4136,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); break; default: - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } } @@ -4185,7 +4180,7 @@ ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) * * This function adds one LAN queue */ -enum ice_status +int ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -4193,19 +4188,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_sched_node *parent; struct ice_q_ctx *q_ctx; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; if (num_qgrps > 1 || buf->num_txqs > 1) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; hw = pi->hw; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); @@ -4213,7 +4208,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, if (!q_ctx) { ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", q_handle); - status = ICE_ERR_PARAM; + status = -EINVAL; goto ena_txq_exit; } @@ -4221,7 +4216,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ICE_SCHED_NODE_OWNER_LAN); if (!parent) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto ena_txq_exit; } @@ -4290,20 +4285,20 @@ ena_txq_exit: * * This function removes queues and their corresponding nodes in SW DB */ -enum ice_status +int ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handles, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { - enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_aqc_dis_txq_item *qg_list; struct ice_q_ctx *q_ctx; + int status = -ENOENT; struct ice_hw *hw; u16 i, buf_size; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; hw = pi->hw; @@ -4315,13 +4310,13 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, if (rst_src) return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, vmvf_num, NULL); - return ICE_ERR_CFG; + return -EIO; } buf_size = struct_size(qg_list, q_id, 1); qg_list = kzalloc(buf_size, GFP_KERNEL); if (!qg_list) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mutex_lock(&pi->sched_lock); @@ -4368,18 +4363,18 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, * * This function adds/updates the VSI queues per TC. */ -static enum ice_status +static int ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *maxqs, u8 owner) { - enum ice_status status = 0; + int status = 0; u8 i; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); @@ -4407,7 +4402,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, * * This function adds/updates the VSI LAN queues per TC. */ -enum ice_status +int ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs) { @@ -4428,9 +4423,8 @@ int ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_rdmaqs) { - return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, - max_rdmaqs, - ICE_SCHED_NODE_OWNER_RDMA)); + return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, + ICE_SCHED_NODE_OWNER_RDMA); } /** @@ -4451,7 +4445,6 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_aqc_add_rdma_qset_data *buf; struct ice_sched_node *parent; - enum ice_status status; struct ice_hw *hw; u16 i, buf_size; int ret; @@ -4502,12 +4495,10 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; for (i = 0; i < num_qsets; i++) { node.node_teid = buf->rdma_qsets[i].qset_teid; - status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, - &node); - if (status) { - ret = ice_status_to_errno(status); + ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, + &node); + if (ret) break; - } qset_teid[i] = le32_to_cpu(node.node_teid); } rdma_error_exit: @@ -4528,8 +4519,8 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id) { struct ice_aqc_dis_txq_item *qg_list; - enum ice_status status = 0; struct ice_hw *hw; + int status = 0; u16 qg_size; int i; @@ -4568,7 +4559,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, mutex_unlock(&pi->sched_lock); kfree(qg_list); - return ice_status_to_errno(status); + return status; } /** @@ -4577,7 +4568,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, * * Initializes required config data for VSI, FD, ACL, and RSS before replay. */ -static enum ice_status ice_replay_pre_init(struct ice_hw *hw) +static int ice_replay_pre_init(struct ice_hw *hw) { struct ice_switch_info *sw = hw->switch_info; u8 i; @@ -4604,12 +4595,12 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw) * Restore all VSI configuration after reset. It is required to call this * function with main VSI first. */ -enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) { - enum ice_status status; + int status; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; /* Replay pre-initialization if there is any */ if (vsi_handle == ICE_MAIN_VSI_HANDLE) { @@ -4725,12 +4716,12 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, * * This function queries HW element information */ -enum ice_status +int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf) { u16 buf_size, num_elem_ret = 0; - enum ice_status status; + int status; buf_size = sizeof(*buf); memset(buf, 0, buf_size); @@ -4775,7 +4766,7 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, cmd->param_indx = idx; cmd->param_val = cpu_to_le32(value); - return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** @@ -4796,7 +4787,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, { struct ice_aqc_driver_shared_params *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (idx >= ICE_AQC_DRIVER_PARAM_MAX) return -EIO; @@ -4810,7 +4801,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) - return ice_status_to_errno(status); + return status; *value = le32_to_cpu(cmd->param_val); @@ -4840,7 +4831,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, cmd->gpio_num = pin_idx; cmd->gpio_val = value ? 1 : 0; - return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** @@ -4860,7 +4851,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, { struct ice_aqc_gpio *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); cmd = &desc.params.read_write_gpio; @@ -4869,7 +4860,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) - return ice_status_to_errno(status); + return status; *value = !!cmd->gpio_val; return 0; @@ -4903,13 +4894,13 @@ bool ice_fw_supports_link_override(struct ice_hw *hw) * * Gets the link default override for a port */ -enum ice_status +int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi) { u16 i, tlv, tlv_len, tlv_start, buf, offset; struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); @@ -4994,7 +4985,7 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) * * Set the LLDP MIB. (0x0A08) */ -enum ice_status +int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd) { @@ -5004,7 +4995,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, cmd = &desc.params.lldp_set_mib; if (buf_size == 0 || !buf) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); @@ -5044,7 +5035,7 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) * @vsi_num: absolute HW index for VSI * @add: boolean for if adding or removing a filter */ -enum ice_status +int ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) { struct ice_aqc_lldp_filter_ctrl *cmd; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 65c1b3244264..1c57097ddf0b 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -14,108 +14,108 @@ #define ICE_SQ_SEND_DELAY_TIME_MS 10 #define ICE_SQ_SEND_MAX_EXECUTE 3 -enum ice_status ice_init_hw(struct ice_hw *hw); +int ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); -enum ice_status ice_check_reset(struct ice_hw *hw); -enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); -enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); -enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); +int ice_check_reset(struct ice_hw *hw); +int ice_reset(struct ice_hw *hw, enum ice_reset_req req); +int ice_create_all_ctrlq(struct ice_hw *hw); +int ice_init_all_ctrlq(struct ice_hw *hw); void ice_shutdown_all_ctrlq(struct ice_hw *hw); void ice_destroy_all_ctrlq(struct ice_hw *hw); -enum ice_status +int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); -enum ice_status +int ice_get_link_status(struct ice_port_info *pi, bool *link_up); -enum ice_status ice_update_link_info(struct ice_port_info *pi); -enum ice_status +int ice_update_link_info(struct ice_port_info *pi); +int ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); -enum ice_status +int ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res); -enum ice_status +int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res); -enum ice_status +int ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd); bool ice_is_sbq_supported(struct ice_hw *hw); struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw); -enum ice_status +int ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); void ice_clear_pxe_mode(struct ice_hw *hw); -enum ice_status ice_get_caps(struct ice_hw *hw); +int ice_get_caps(struct ice_hw *hw); void ice_set_safe_mode_caps(struct ice_hw *hw); -enum ice_status +int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index); -enum ice_status +int ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params); -enum ice_status +int ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params); -enum ice_status +int ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); -enum ice_status +int ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); -enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); +int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); extern const struct ice_ctx_ele ice_tlan_ctx_info[]; -enum ice_status +int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); extern struct mutex ice_global_cfg_lock_sw; -enum ice_status +int ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); +int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd); -enum ice_status +int ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps); void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap); -enum ice_status +int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); bool ice_is_e810(struct ice_hw *hw); -enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); -enum ice_status +int ice_clear_pf_cfg(struct ice_hw *hw); +int ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); bool ice_fw_supports_link_override(struct ice_hw *hw); -enum ice_status +int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); -enum ice_status +int ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update); -enum ice_status +int ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode fc); bool @@ -125,27 +125,27 @@ void ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); -enum ice_status +int ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec); -enum ice_status +int ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd); @@ -159,19 +159,19 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, int ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id); -enum ice_status +int ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handle, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd); -enum ice_status +int ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs); -enum ice_status +int ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); +int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); struct ice_q_ctx * @@ -184,7 +184,7 @@ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); bool ice_is_e810t(struct ice_hw *hw); -enum ice_status +int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); int @@ -199,11 +199,11 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, int ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool *value, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); -enum ice_status +int ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 03bdb125be36..6bcfee295991 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -87,7 +87,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); @@ -96,7 +96,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) &cq->sq.desc_buf.pa, GFP_KERNEL | __GFP_ZERO); if (!cq->sq.desc_buf.va) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->sq.desc_buf.size = size; cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, @@ -107,7 +107,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->sq.desc_buf.va = NULL; cq->sq.desc_buf.pa = 0; cq->sq.desc_buf.size = 0; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } return 0; @@ -118,7 +118,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); @@ -127,7 +127,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) &cq->rq.desc_buf.pa, GFP_KERNEL | __GFP_ZERO); if (!cq->rq.desc_buf.va) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->rq.desc_buf.size = size; return 0; } @@ -154,7 +154,7 @@ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; @@ -165,7 +165,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, sizeof(cq->rq.desc_buf), GFP_KERNEL); if (!cq->rq.dma_head) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; /* allocate the mapped buffers */ @@ -218,7 +218,7 @@ unwind_alloc_rq_bufs: devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); cq->rq.dma_head = NULL; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -226,7 +226,7 @@ unwind_alloc_rq_bufs: * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; @@ -235,7 +235,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, sizeof(cq->sq.desc_buf), GFP_KERNEL); if (!cq->sq.dma_head) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; /* allocate the mapped buffers */ @@ -266,10 +266,10 @@ unwind_alloc_sq_bufs: devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); cq->sq.dma_head = NULL; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } -static enum ice_status +static int ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) { /* Clear Head and Tail */ @@ -283,7 +283,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) /* Check one register to verify that config was applied */ if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) - return ICE_ERR_AQ_ERROR; + return -EIO; return 0; } @@ -295,8 +295,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) * * Configure base address and length registers for the transmit queue */ -static enum ice_status -ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); } @@ -308,10 +307,9 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) * * Configure base address and length registers for the receive (event queue) */ -static enum ice_status -ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status status; + int status; status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); if (status) @@ -361,19 +359,19 @@ do { \ * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ -static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code; + int ret_code; if (cq->sq.count > 0) { /* queue already initialized */ - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_sq_entries || !cq->sq_buf_size) { - ret_code = ICE_ERR_CFG; + ret_code = -EIO; goto init_ctrlq_exit; } @@ -421,19 +419,19 @@ init_ctrlq_exit: * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ -static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code; + int ret_code; if (cq->rq.count > 0) { /* queue already initialized */ - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->rq_buf_size) { - ret_code = ICE_ERR_CFG; + ret_code = -EIO; goto init_ctrlq_exit; } @@ -474,15 +472,14 @@ init_ctrlq_exit: * * The main shutdown routine for the Control Transmit Queue */ -static enum ice_status -ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code = 0; + int ret_code = 0; mutex_lock(&cq->sq_lock); if (!cq->sq.count) { - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto shutdown_sq_out; } @@ -541,15 +538,14 @@ static bool ice_aq_ver_check(struct ice_hw *hw) * * The main shutdown routine for the Control Receive Queue */ -static enum ice_status -ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code = 0; + int ret_code = 0; mutex_lock(&cq->rq_lock); if (!cq->rq.count) { - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto shutdown_rq_out; } @@ -576,17 +572,17 @@ shutdown_rq_out: * ice_init_check_adminq - Check version for Admin Queue to know if its alive * @hw: pointer to the hardware structure */ -static enum ice_status ice_init_check_adminq(struct ice_hw *hw) +static int ice_init_check_adminq(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->adminq; - enum ice_status status; + int status; status = ice_aq_get_fw_ver(hw, NULL); if (status) goto init_ctrlq_free_rq; if (!ice_aq_ver_check(hw)) { - status = ICE_ERR_FW_API_VER; + status = -EIO; goto init_ctrlq_free_rq; } @@ -612,10 +608,10 @@ init_ctrlq_free_rq: * * NOTE: this function does not initialize the controlq locks */ -static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { struct ice_ctl_q_info *cq; - enum ice_status ret_code; + int ret_code; switch (q_type) { case ICE_CTL_Q_ADMIN: @@ -631,14 +627,14 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) cq = &hw->mailboxq; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } cq->qtype = q_type; /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->num_sq_entries || !cq->rq_buf_size || !cq->sq_buf_size) { - return ICE_ERR_CFG; + return -EIO; } /* setup SQ command write back timeout */ @@ -751,10 +747,10 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) * * NOTE: this function does not initialize the controlq locks. */ -enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) +int ice_init_all_ctrlq(struct ice_hw *hw) { - enum ice_status status; u32 retry = 0; + int status; /* Init FW admin queue */ do { @@ -763,7 +759,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) return status; status = ice_init_check_adminq(hw); - if (status != ICE_ERR_AQ_FW_CRITICAL) + if (status != -EIO) break; ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); @@ -814,7 +810,7 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) * driver needs to re-initialize control queues at run time it should call * ice_init_all_ctrlq instead. */ -enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) +int ice_create_all_ctrlq(struct ice_hw *hw) { ice_init_ctrlq_locks(&hw->adminq); if (ice_is_sbq_supported(hw)) @@ -962,7 +958,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) * This is the main send command routine for the ATQ. It runs the queue, * cleans the queue, etc. */ -enum ice_status +int ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -970,27 +966,27 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_dma_mem *dma_buf = NULL; struct ice_aq_desc *desc_on_ring; bool cmd_completed = false; - enum ice_status status = 0; struct ice_sq_cd *details; u32 total_delay = 0; + int status = 0; u16 retval = 0; u32 val = 0; /* if reset is in progress return a soft error */ if (hw->reset_ongoing) - return ICE_ERR_RESET_ONGOING; + return -EBUSY; mutex_lock(&cq->sq_lock); cq->sq_last_status = ICE_AQ_RC_OK; if (!cq->sq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); - status = ICE_ERR_AQ_EMPTY; + status = -EIO; goto sq_send_command_error; } if ((buf && !buf_size) || (!buf && buf_size)) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto sq_send_command_error; } @@ -998,7 +994,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (buf_size > cq->sq_buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", buf_size); - status = ICE_ERR_INVAL_SIZE; + status = -EINVAL; goto sq_send_command_error; } @@ -1011,7 +1007,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (val >= cq->num_sq_entries) { ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", val); - status = ICE_ERR_AQ_EMPTY; + status = -EIO; goto sq_send_command_error; } @@ -1028,7 +1024,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, */ if (ice_clean_sq(hw, cq) == 0) { ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); - status = ICE_ERR_AQ_FULL; + status = -ENOSPC; goto sq_send_command_error; } @@ -1082,7 +1078,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (copy_size > buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", copy_size, buf_size); - status = ICE_ERR_AQ_ERROR; + status = -EIO; } else { memcpy(buf, dma_buf->va, copy_size); } @@ -1098,7 +1094,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, } cmd_completed = true; if (!status && retval != ICE_AQ_RC_OK) - status = ICE_ERR_AQ_ERROR; + status = -EIO; cq->sq_last_status = (enum ice_aq_err)retval; } @@ -1116,10 +1112,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); - status = ICE_ERR_AQ_FW_CRITICAL; + status = -EIO; } else { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); - status = ICE_ERR_AQ_TIMEOUT; + status = -EIO; } } @@ -1154,15 +1150,15 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) * the contents through e. It can also return how many events are * left to process through 'pending'. */ -enum ice_status +int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending) { u16 ntc = cq->rq.next_to_clean; enum ice_aq_err rq_last_status; - enum ice_status ret_code = 0; struct ice_aq_desc *desc; struct ice_dma_mem *bi; + int ret_code = 0; u16 desc_idx; u16 datalen; u16 flags; @@ -1176,7 +1172,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (!cq->rq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); - ret_code = ICE_ERR_AQ_EMPTY; + ret_code = -EIO; goto clean_rq_elem_err; } @@ -1185,7 +1181,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - ret_code = ICE_ERR_AQ_NO_WORK; + ret_code = -EALREADY; goto clean_rq_elem_out; } @@ -1196,7 +1192,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); flags = le16_to_cpu(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { - ret_code = ICE_ERR_AQ_ERROR; + ret_code = -EIO; ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", le16_to_cpu(desc->opcode), rq_last_status); } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 241427cd9bc0..0b146a0d4205 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -2,7 +2,6 @@ /* Copyright (c) 2019, Intel Corporation. */ #include "ice_common.h" -#include "ice_lib.h" #include "ice_sched.h" #include "ice_dcb.h" @@ -19,19 +18,19 @@ * * Requests the complete LLDP MIB (entire packet). (0x0A00) */ -static enum ice_status +static int ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, u16 buf_size, u16 *local_len, u16 *remote_len, struct ice_sq_cd *cd) { struct ice_aqc_lldp_get_mib *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.lldp_get_mib; if (buf_size == 0 || !buf) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib); @@ -61,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes (0x0A01) */ -static enum ice_status +static int ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, struct ice_sq_cd *cd) { @@ -89,7 +88,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, * * Stop or Shutdown the embedded LLDP Agent (0x0A05) */ -enum ice_status +int ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd) { @@ -117,8 +116,7 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, * * Start the embedded LLDP Agent on all ports. (0x0A06) */ -enum ice_status -ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) +int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) { struct ice_aqc_lldp_start *cmd; struct ice_aq_desc desc; @@ -598,18 +596,17 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) * * Parse DCB configuration from the LLDPDU */ -static enum ice_status -ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) +static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) { struct ice_lldp_org_tlv *tlv; - enum ice_status ret = 0; u16 offset = 0; + int ret = 0; u16 typelen; u16 type; u16 len; if (!lldpmib || !dcbcfg) - return ICE_ERR_PARAM; + return -EINVAL; /* set to the start of LLDPDU */ lldpmib += ETH_HLEN; @@ -649,17 +646,17 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) * * Query DCB configuration from the firmware */ -enum ice_status +int ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg) { - enum ice_status ret; u8 *lldpmib; + int ret; /* Allocate the LLDPDU */ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); if (!lldpmib) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib, ICE_LLDPDU_SIZE, NULL, NULL, NULL); @@ -684,17 +681,17 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, * @cd: pointer to command details structure or NULL * * Start/Stop the embedded dcbx Agent. In case that this wrapper function - * returns ICE_SUCCESS, caller will need to check if FW returns back the same + * returns 0, caller will need to check if FW returns back the same * value as stated in dcbx_agent_status, and react accordingly. (0x0A09) */ -enum ice_status +int ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd) { struct ice_aqc_lldp_stop_start_specific_agent *cmd; - enum ice_status status; struct ice_aq_desc desc; u16 opcode; + int status; cmd = &desc.params.lldp_agent_ctrl; @@ -724,7 +721,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, * * Get CEE DCBX mode operational configuration from firmware (0x0A07) */ -static enum ice_status +static int ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, struct ice_aqc_get_cee_dcb_cfg_resp *buff, struct ice_sq_cd *cd) @@ -749,7 +746,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) { struct ice_aqc_set_query_pfc_mode *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC) return -EINVAL; @@ -762,7 +759,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) - return ice_status_to_errno(status); + return status; /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has @@ -903,14 +900,13 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, * * Get IEEE or CEE mode DCB configuration from the Firmware */ -static enum ice_status -ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) +static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) { struct ice_dcbx_cfg *dcbx_cfg = NULL; - enum ice_status ret; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; if (dcbx_mode == ICE_DCBX_MODE_IEEE) dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; @@ -943,14 +939,14 @@ out: * * Get DCB configuration from the Firmware */ -enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) +int ice_get_dcb_cfg(struct ice_port_info *pi) { struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg; struct ice_dcbx_cfg *dcbx_cfg; - enum ice_status ret; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); if (!ret) { @@ -974,13 +970,13 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) * * Update DCB configuration from the Firmware */ -enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) +int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) { struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; - enum ice_status ret = 0; + int ret = 0; if (!hw->func_caps.common_cap.dcb) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; qos_cfg->is_sw_lldp = true; @@ -996,7 +992,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) return ret; qos_cfg->is_sw_lldp = false; } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) { - return ICE_ERR_NOT_READY; + return -EBUSY; } /* Configure the LLDP MIB change event */ @@ -1016,19 +1012,19 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) * * Configure (disable/enable) MIB */ -enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) +int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) { struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; - enum ice_status ret; + int ret; if (!hw->func_caps.common_cap.dcb) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; /* Get DCBX status */ qos_cfg->dcbx_status = ice_get_dcbx_status(hw); if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) - return ICE_ERR_NOT_READY; + return -EBUSY; ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); if (!ret) @@ -1469,16 +1465,16 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg) * * Set DCB configuration to the Firmware */ -enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) +int ice_set_dcb_cfg(struct ice_port_info *pi) { u8 mib_type, *lldpmib = NULL; struct ice_dcbx_cfg *dcbcfg; - enum ice_status ret; struct ice_hw *hw; u16 miblen; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; @@ -1487,7 +1483,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) /* Allocate the LLDPDU */ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); if (!lldpmib) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) @@ -1511,17 +1507,17 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) * * query current port ETS configuration */ -static enum ice_status +static int ice_aq_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_query_port_ets *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.port_ets; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets); cmd->port_teid = pi->root->info.node_teid; @@ -1537,18 +1533,18 @@ ice_aq_query_port_ets(struct ice_port_info *pi, * * update the SW DB with the new TC changes */ -static enum ice_status +static int ice_update_port_tc_tree_cfg(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf) { struct ice_sched_node *node, *tc_node; struct ice_aqc_txsched_elem_data elem; - enum ice_status status = 0; u32 teid1, teid2; + int status = 0; u8 i, j; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; /* suspend the missing TC nodes */ for (i = 0; i < pi->root->num_children; i++) { teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid); @@ -1605,12 +1601,12 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi, * query current port ETS configuration and update the * SW DB with the TC changes */ -enum ice_status +int ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_aq_query_port_ets(pi, buf, buf_size, cd); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index 9b6f87a889a6..d73348f279f7 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -138,28 +138,27 @@ struct ice_cee_app_prio { } __packed; int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); -enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); -enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); -enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); -enum ice_status +int ice_get_dcb_cfg(struct ice_port_info *pi); +int ice_set_dcb_cfg(struct ice_port_info *pi); +int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); +int ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cmd_details); #ifdef CONFIG_DCB -enum ice_status +int ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd); -enum ice_status -ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); +int ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd); -enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); +int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); #else /* CONFIG_DCB */ -static inline enum ice_status +static inline int ice_aq_stop_lldp(struct ice_hw __always_unused *hw, bool __always_unused shutdown_lldp_agent, bool __always_unused persist, @@ -168,7 +167,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_aq_start_lldp(struct ice_hw __always_unused *hw, bool __always_unused persist, struct ice_sq_cd __always_unused *cd) @@ -176,7 +175,7 @@ ice_aq_start_lldp(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, bool __always_unused start_dcbx_agent, bool *dcbx_agent_status, @@ -187,7 +186,7 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, bool __always_unused ena_mib) { diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index a72e18320a22..b94d8daeaa58 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -528,7 +528,7 @@ void ice_dcb_rebuild(struct ice_pf *pf) struct ice_aqc_port_ets_elem buf = { 0 }; struct device *dev = ice_pf_to_dev(pf); struct ice_dcbx_cfg *err_cfg; - enum ice_status ret; + int ret; ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 7fdeb411b6df..3eb01731e496 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -97,6 +97,9 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc; + if (!bwcfg) + new_cfg->etscfg.tcbwtable[0] = 100; + if (!bwrec) new_cfg->etsrec.tcbwtable[0] = 100; @@ -167,15 +170,18 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) if (mode == pf->dcbx_cap) return ICE_DCB_NO_HW_CHG; - pf->dcbx_cap = mode; qos_cfg = &pf->hw.port_info->qos_cfg; - if (mode & DCB_CAP_DCBX_VER_CEE) { - if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) - return ICE_DCB_NO_HW_CHG; + + /* DSCP configuration is not DCBx negotiated */ + if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) + return ICE_DCB_NO_HW_CHG; + + pf->dcbx_cap = mode; + + if (mode & DCB_CAP_DCBX_VER_CEE) qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; - } else { + else qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; - } dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); return ICE_DCB_HW_CHG_RST; diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 478412b28a76..737ee23f5a87 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -39,13 +39,13 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); if (status) /* We failed to locate the PBA, so just skip this entry */ - dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", - ice_stat_str(status)); + dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n", + status); } static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) @@ -251,7 +251,6 @@ static int ice_devlink_info_get(struct devlink *devlink, struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_info_ctx *ctx; - enum ice_status status; size_t i; int err; @@ -266,20 +265,19 @@ static int ice_devlink_info_get(struct devlink *devlink, return -ENOMEM; /* discover capabilities first */ - status = ice_discover_dev_caps(hw, &ctx->dev_caps); - if (status) { - dev_dbg(dev, "Failed to discover device capabilities, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_discover_dev_caps(hw, &ctx->dev_caps); + if (err) { + dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities"); - err = -EIO; goto out_free_ctx; } if (ctx->dev_caps.common_cap.nvm_update_pending_orom) { - status = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); - if (status) { - dev_dbg(dev, "Unable to read inactive Option ROM version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); + if (err) { + dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_orom = false; @@ -287,10 +285,10 @@ static int ice_devlink_info_get(struct devlink *devlink, } if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) { - status = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); - if (status) { - dev_dbg(dev, "Unable to read inactive NVM version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); + if (err) { + dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_nvm = false; @@ -298,10 +296,10 @@ static int ice_devlink_info_get(struct devlink *devlink, } if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) { - status = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); - if (status) { - dev_dbg(dev, "Unable to read inactive Netlist version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); + if (err) { + dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_netlist = false; @@ -436,7 +434,7 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, { struct ice_pf *pf = devlink_priv(devlink); - ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2; + ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false; return 0; } @@ -762,9 +760,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; void *nvm_data; u32 nvm_size; + int status; nvm_size = hw->flash.flash_size; nvm_data = vzalloc(nvm_size); @@ -777,7 +775,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, status, hw->adminq.sq_last_status); NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); vfree(nvm_data); - return -EIO; + return status; } status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false); @@ -787,7 +785,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); ice_release_nvm(hw); vfree(nvm_data); - return -EIO; + return status; } ice_release_nvm(hw); @@ -819,8 +817,8 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink, struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; void *devcaps; + int status; devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN); if (!devcaps) @@ -833,7 +831,7 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink, status, hw->adminq.sq_last_status); NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities"); vfree(devcaps); - return -EIO; + return status; } *data = (u8 *)devcaps; diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h index faea757fcf5d..fe006d9946f8 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -4,10 +4,6 @@ #ifndef _ICE_DEVLINK_H_ #define _ICE_DEVLINK_H_ -enum ice_devlink_param_id { - ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, -}; - struct ice_pf *ice_allocate_pf(struct device *dev); void ice_devlink_register(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 5af2faaa21e1..aefd9c3d450b 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -270,9 +270,8 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct device *dev; - int ret = 0; + int ret; u8 *buf; dev = ice_pf_to_dev(pf); @@ -285,22 +284,18 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, if (!buf) return -ENOMEM; - status = ice_acquire_nvm(hw, ICE_RES_READ); - if (status) { - dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_acquire_nvm(hw, ICE_RES_READ); + if (ret) { + dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } - status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, - false); - if (status) { - dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, + false); + if (ret) { + dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto release; } @@ -342,14 +337,14 @@ static bool ice_active_vfs(struct ice_pf *pf) static u64 ice_link_test(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); - enum ice_status status; bool link_up = false; + int status; netdev_info(netdev, "link test\n"); status = ice_get_link_status(np->vsi->port_info, &link_up); if (status) { - netdev_err(netdev, "link query error, status = %s\n", - ice_stat_str(status)); + netdev_err(netdev, "link query error, status = %d\n", + status); return 1; } @@ -1052,8 +1047,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) struct ice_link_status *link_info; struct ice_vsi *vsi = np->vsi; struct ice_port_info *pi; - enum ice_status status; - int err = 0; + int err; pi = vsi->port_info; @@ -1079,12 +1073,10 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) if (!caps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - caps, NULL); - if (status) { - err = -EAGAIN; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + caps, NULL); + if (err) goto done; - } /* Set supported/configured FEC modes based on PHY capability */ if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC) @@ -1203,7 +1195,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { - enum ice_status status; + int status; /* Disable FW LLDP engine */ status = ice_cfg_lldp_mib_change(&pf->hw, false); @@ -1232,8 +1224,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED; pf->dcbx_cap |= DCB_CAP_DCBX_HOST; } else { - enum ice_status status; bool dcbx_agent_status; + int status; if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) { clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); @@ -1938,8 +1930,7 @@ ice_get_link_ksettings(struct net_device *netdev, struct ice_aqc_get_phy_caps_data *caps; struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; - enum ice_status status; - int err = 0; + int err; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); @@ -1990,12 +1981,10 @@ ice_get_link_ksettings(struct net_device *netdev, if (!caps) return -ENOMEM; - status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); + if (err) goto done; - } /* Set the advertised flow control based on the PHY capability */ if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && @@ -2027,12 +2016,10 @@ ice_get_link_ksettings(struct net_device *netdev, caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); + if (err) goto done; - } /* Set supported FEC modes based on PHY capability */ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); @@ -2210,11 +2197,10 @@ ice_set_link_ksettings(struct net_device *netdev, struct ice_pf *pf = np->vsi->back; struct ice_port_info *pi; u8 autoneg_changed = 0; - enum ice_status status; u64 phy_type_high = 0; u64 phy_type_low = 0; - int err = 0; bool linkup; + int err; pi = np->vsi->port_info; @@ -2234,15 +2220,13 @@ ice_set_link_ksettings(struct net_device *netdev, /* Get the PHY capabilities based on media */ if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - phy_caps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + phy_caps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - phy_caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + phy_caps, NULL); + if (err) goto done; - } /* save autoneg out of ksettings */ autoneg = copy_ks.base.autoneg; @@ -2308,11 +2292,9 @@ ice_set_link_ksettings(struct net_device *netdev, /* Call to get the current link speed */ pi->phy.get_link_info = true; - status = ice_get_link_status(pi, &linkup); - if (status) { - err = -EIO; + err = ice_get_link_status(pi, &linkup); + if (err) goto done; - } curr_link_speed = pi->phy.link_info.link_speed; adv_link_speed = ice_ksettings_find_adv_link_speed(ks); @@ -2381,10 +2363,9 @@ ice_set_link_ksettings(struct net_device *netdev, } /* make the aq call */ - status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); - if (status) { + err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); + if (err) { netdev_info(netdev, "Set phy config failed,\n"); - err = -EIO; goto done; } @@ -2522,9 +2503,9 @@ static int ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; u64 hashed_flds; + int status; u32 hdrs; dev = ice_pf_to_dev(pf); @@ -2550,9 +2531,9 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs); if (status) { - dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); - return -EINVAL; + dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", + vsi->vsi_num, status); + return status; } return 0; @@ -2953,7 +2934,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) struct ice_port_info *pi = np->vsi->port_info; struct ice_aqc_get_phy_caps_data *pcaps; struct ice_dcbx_cfg *dcbx_cfg; - enum ice_status status; + int status; /* Initialize pause params */ pause->rx_pause = 0; @@ -3003,11 +2984,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) struct ice_vsi *vsi = np->vsi; struct ice_hw *hw = &pf->hw; struct ice_port_info *pi; - enum ice_status status; u8 aq_failures; bool link_up; - int err = 0; u32 is_an; + int err; pi = vsi->port_info; hw_link_info = &pi->phy.link_info; @@ -3033,11 +3013,11 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, - NULL); - if (status) { + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (err) { kfree(pcaps); - return -EIO; + return err; } is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : @@ -3073,22 +3053,19 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -EINVAL; /* Set the FC mode and only restart AN if link is up */ - status = ice_set_fc(pi, &aq_failures, link_up); + err = ice_set_fc(pi, &aq_failures, link_up); if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) { - netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) { - netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } @@ -3928,16 +3905,16 @@ ice_get_module_info(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 sff8472_comp = 0; u8 sff8472_swap = 0; u8 sff8636_rev = 0; u8 value = 0; + int status; status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00, 0, &value, 1, 0, NULL); if (status) - return -EIO; + return status; switch (value) { case ICE_MODULE_TYPE_SFP: @@ -3945,12 +3922,12 @@ ice_get_module_info(struct net_device *netdev, ICE_MODULE_SFF_8472_COMP, 0x00, 0, &sff8472_comp, 1, 0, NULL); if (status) - return -EIO; + return status; status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, ICE_MODULE_SFF_8472_SWAP, 0x00, 0, &sff8472_swap, 1, 0, NULL); if (status) - return -EIO; + return status; if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) { modinfo->type = ETH_MODULE_SFF_8079; @@ -3970,7 +3947,7 @@ ice_get_module_info(struct net_device *netdev, ICE_MODULE_REVISION_ADDR, 0x00, 0, &sff8636_rev, 1, 0, NULL); if (status) - return -EIO; + return status; /* Check revision compliance */ if (sff8636_rev > 0x02) { /* Module is SFF-8636 compliant */ @@ -4005,11 +3982,11 @@ ice_get_module_eeprom(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; bool is_sfp = false; unsigned int i, j; u16 offset = 0; u8 page = 0; + int status; if (!ee || !ee->len || !data) return -EINVAL; @@ -4017,7 +3994,7 @@ ice_get_module_eeprom(struct net_device *netdev, status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0, NULL); if (status) - return -EIO; + return status; if (value[0] == ICE_MODULE_TYPE_SFP) is_sfp = true; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 38960bcc384c..bbc64d6ce4cd 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -530,7 +530,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, struct ice_flow_prof *prof = NULL; struct ice_fd_hw_prof *hw_prof; struct ice_hw *hw = &pf->hw; - enum ice_status status; u64 entry1_h = 0; u64 entry2_h = 0; u64 prof_id; @@ -581,24 +580,20 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, * actions (NULL) and zero actions 0. */ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - TNL_SEG_CNT(tun), &prof); - if (status) - return ice_status_to_errno(status); - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, - main_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry1_h); - if (status) { - err = ice_status_to_errno(status); + err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, + TNL_SEG_CNT(tun), &prof); + if (err) + return err; + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + main_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry1_h); + if (err) goto err_prof; - } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, - ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry2_h); - if (status) { - err = ice_status_to_errno(status); + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry2_h); + if (err) goto err_entry; - } hw_prof->fdir_seg[tun] = seg; hw_prof->entry_h[0][tun] = entry1_h; @@ -1190,7 +1185,6 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, struct ice_hw *hw = &pf->hw; struct ice_fltr_desc desc; struct ice_vsi *ctrl_vsi; - enum ice_status status; u8 *pkt, *frag_pkt; bool has_frag; int err; @@ -1209,11 +1203,9 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, } ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); - if (status) { - err = ice_status_to_errno(status); + err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); + if (err) goto err_free_all; - } err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); if (err) goto err_free_all; @@ -1223,12 +1215,10 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, if (has_frag) { /* does not return error */ ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, - is_tun); - if (status) { - err = ice_status_to_errno(status); + err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, + is_tun); + if (err) goto err_frag; - } err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt); if (err) goto err_frag; @@ -1268,7 +1258,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool is_tun = tun == ICE_FD_HW_SEG_TUN; int err; - if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num)) + if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL)) continue; err = ice_fdir_write_fltr(pf, input, add, is_tun); if (err) @@ -1652,7 +1642,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) } /* return error if not an update and no available filters */ - fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1; + fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1; if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index cbd8424631e3..ae089d32ee9d 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -712,7 +712,7 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, * @hw: pointer to the hardware structure * @cntr_id: returns counter index */ -enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) +int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); @@ -723,7 +723,7 @@ enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) * @hw: pointer to the hardware structure * @cntr_id: counter index to be freed */ -enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) +int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) { return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); @@ -735,8 +735,7 @@ enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) * @cntr_id: returns counter index * @num_fltr: number of filter entries to be allocated */ -enum ice_status -ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) +int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES, ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, @@ -749,8 +748,7 @@ ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) * @cntr_id: returns counter index * @num_fltr: number of filter entries to be allocated */ -enum ice_status -ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) +int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES, ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, @@ -872,7 +870,7 @@ static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr) * @frag: generate a fragment packet * @tun: true implies generate a tunnel packet */ -enum ice_status +int ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u8 *pkt, bool frag, bool tun) { @@ -919,15 +917,15 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, if (ice_fdir_pkt[idx].flow == flow) break; if (idx == ICE_FDIR_NUM_PKT) - return ICE_ERR_PARAM; + return -EINVAL; if (!tun) { memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len); loc = pkt; } else { - if (!ice_get_open_tunnel_port(hw, &tnl_port)) - return ICE_ERR_DOES_NOT_EXIST; + if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL)) + return -ENOENT; if (!ice_fdir_pkt[idx].tun_pkt) - return ICE_ERR_PARAM; + return -EINVAL; memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, ice_fdir_pkt[idx].tun_pkt_len); ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET, @@ -1111,7 +1109,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } if (input->flex_fltr) diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index da4163856f4c..b6c7c6903f35 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -201,16 +201,14 @@ struct ice_fdir_base_pkt { const u8 *tun_pkt; }; -enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); -enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); -enum ice_status -ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); -enum ice_status -ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); +int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); +int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); +int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); +int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); void ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, struct ice_fltr_desc *fdesc, bool add); -enum ice_status +int ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u8 *pkt, bool frag, bool tun); int ice_get_fdir_cnt_all(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 23cfcceb1536..d29197ab3d02 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -314,6 +314,78 @@ ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, } /** + * ice_hw_ptype_ena - check if the PTYPE is enabled or not + * @hw: pointer to the HW structure + * @ptype: the hardware PTYPE + */ +bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype) +{ + return ptype < ICE_FLOW_PTYPE_MAX && + test_bit(ptype, hw->hw_ptype); +} + +/** + * ice_marker_ptype_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the Marker PType TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual Marker PType TCAM entries. + */ +static void * +ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_marker_ptype_tcam_section *marker_ptype; + + if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) + return NULL; + + if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + marker_ptype = section; + if (index >= le16_to_cpu(marker_ptype->count)) + return NULL; + + return marker_ptype->tcam + index; +} + +/** + * ice_fill_hw_ptype - fill the enabled PTYPE bit information + * @hw: pointer to the HW structure + */ +static void ice_fill_hw_ptype(struct ice_hw *hw) +{ + struct ice_marker_ptype_tcam_entry *tcam; + struct ice_seg *seg = hw->seg; + struct ice_pkg_enum state; + + bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); + if (!seg) + return; + + memset(&state, 0, sizeof(state)); + + do { + tcam = ice_pkg_enum_entry(seg, &state, + ICE_SID_RXPARSER_MARKER_PTYPE, NULL, + ice_marker_ptype_tcam_handler); + if (tcam && + le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && + le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) + set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); + + seg = NULL; + } while (tcam); +} + +/** * ice_boost_tcam_handler * @sect_type: section type * @section: pointer to section @@ -358,7 +430,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) * if it is found. The ice_seg parameter must not be NULL since the first call * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. */ -static enum ice_status +static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, struct ice_boost_tcam_entry **entry) { @@ -368,7 +440,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, memset(&state, 0, sizeof(state)); if (!ice_seg) - return ICE_ERR_PARAM; + return -EINVAL; do { tcam = ice_pkg_enum_entry(ice_seg, &state, @@ -383,7 +455,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, } while (tcam); *entry = NULL; - return ICE_ERR_CFG; + return -EIO; } /** @@ -549,7 +621,7 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) * ------------------------------ * Result: key: b01 10 11 11 00 00 */ -static enum ice_status +static int ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, u8 *key_inv) { @@ -558,7 +630,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) - return ICE_ERR_CFG; + return -EIO; *key = 0; *key_inv = 0; @@ -651,7 +723,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) * dc == NULL --> dc mask is all 0's (no don't care bits) * nm == NULL --> nm mask is all 0's (no never match bits) */ -static enum ice_status +static int ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, u16 len) { @@ -660,11 +732,11 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, /* size must be a multiple of 2 bytes. */ if (size % 2) - return ICE_ERR_CFG; + return -EIO; half_size = size / 2; if (off + len > half_size) - return ICE_ERR_CFG; + return -EIO; /* Make sure at most one bit is set in the never match mask. Having more * than one never match mask bit set will cause HW to consume excessive @@ -672,13 +744,13 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, */ #define ICE_NVR_MTCH_BITS_MAX 1 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) - return ICE_ERR_CFG; + return -EIO; for (i = 0; i < len; i++) if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, dc ? dc[i] : 0, nm ? nm[i] : 0, key + off + i, key + half_size + off + i)) - return ICE_ERR_CFG; + return -EIO; return 0; } @@ -692,25 +764,25 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, * or writing of the package. When attempting to obtain write access, the * caller must check for the following two return values: * - * ICE_SUCCESS - Means the caller has acquired the global config lock - * and can perform writing of the package. - * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the - * package or has found that no update was necessary; in - * this case, the caller can just skip performing any - * update of the package. - */ -static enum ice_status + * 0 - Means the caller has acquired the global config lock + * and can perform writing of the package. + * -EALREADY - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +static int ice_acquire_global_cfg_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { - enum ice_status status; + int status; status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, ICE_GLOBAL_CFG_LOCK_TIMEOUT); if (!status) mutex_lock(&ice_global_cfg_lock_sw); - else if (status == ICE_ERR_AQ_NO_WORK) + else if (status == -EALREADY) ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); return status; @@ -735,7 +807,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw) * * This function will request ownership of the change lock. */ -enum ice_status +int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, @@ -765,14 +837,14 @@ void ice_release_change_lock(struct ice_hw *hw) * * Download Package (0x0C40) */ -static enum ice_status +static int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, bool last_buf, u32 *error_offset, u32 *error_info, struct ice_sq_cd *cd) { struct ice_aqc_download_pkg *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (error_offset) *error_offset = 0; @@ -787,7 +859,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { + if (status == -EIO) { /* Read error from buffer only when the FW returned an error */ struct ice_aqc_download_pkg_resp *resp; @@ -813,14 +885,14 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, * * Update Package (0x0C42) */ -static enum ice_status +static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, bool last_buf, u32 *error_offset, u32 *error_info, struct ice_sq_cd *cd) { struct ice_aqc_download_pkg *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (error_offset) *error_offset = 0; @@ -835,7 +907,7 @@ ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { + if (status == -EIO) { /* Read error from buffer only when the FW returned an error */ struct ice_aqc_download_pkg_resp *resp; @@ -892,11 +964,10 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, * * Obtains change lock and updates package. */ -static enum ice_status -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { - enum ice_status status; u32 offset, info, i; + int status; status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) @@ -921,6 +992,22 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) return status; } +static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) +{ + switch (aq_err) { + case ICE_AQ_RC_ENOSEC: + case ICE_AQ_RC_EBADSIG: + return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; + case ICE_AQ_RC_ESVN: + return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; + case ICE_AQ_RC_EBADMAN: + case ICE_AQ_RC_EBADBUF: + return ICE_DDP_PKG_LOAD_ERROR; + default: + return ICE_DDP_PKG_ERR; + } +} + /** * ice_dwnld_cfg_bufs * @hw: pointer to the hardware structure @@ -931,15 +1018,17 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) * to the firmware. Metadata buffers are skipped, and the first metadata buffer * found indicates that the rest of the buffers are all metadata buffers. */ -static enum ice_status +static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { - enum ice_status status; + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; struct ice_buf_hdr *bh; + enum ice_aq_err err; u32 offset, info, i; + int status; if (!bufs || !count) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; /* If the first buffer's first section has its metadata bit set * then there are no buffers to be downloaded, and the operation is @@ -947,20 +1036,13 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) */ bh = (struct ice_buf_hdr *)bufs; if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) - return 0; - - /* reset pkg_dwnld_status in case this function is called in the - * reset/rebuild flow - */ - hw->pkg_dwnld_status = ICE_AQ_RC_OK; + return ICE_DDP_PKG_SUCCESS; status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); if (status) { - if (status == ICE_ERR_AQ_NO_WORK) - hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; - else - hw->pkg_dwnld_status = hw->adminq.sq_last_status; - return status; + if (status == -EALREADY) + return ICE_DDP_PKG_ALREADY_LOADED; + return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); } for (i = 0; i < count; i++) { @@ -986,11 +1068,11 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) &offset, &info, NULL); /* Save AQ status from download package */ - hw->pkg_dwnld_status = hw->adminq.sq_last_status; if (status) { ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", status, offset, info); - + err = hw->adminq.sq_last_status; + state = ice_map_aq_err_to_ddp_state(err); break; } @@ -1000,7 +1082,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) ice_release_global_cfg_lock(hw); - return status; + return state; } /** @@ -1012,7 +1094,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) * * Get Package Info List (0x0C43) */ -static enum ice_status +static int ice_aq_get_pkg_info_list(struct ice_hw *hw, struct ice_aqc_get_pkg_info_resp *pkg_info, u16 buf_size, struct ice_sq_cd *cd) @@ -1031,7 +1113,7 @@ ice_aq_get_pkg_info_list(struct ice_hw *hw, * * Handles the download of a complete package. */ -static enum ice_status +static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) { struct ice_buf_table *ice_buf_tbl; @@ -1062,13 +1144,13 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) * * Saves off the package details into the HW structure. */ -static enum ice_status +static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) { struct ice_generic_seg_hdr *seg_hdr; if (!pkg_hdr) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); if (seg_hdr) { @@ -1082,7 +1164,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) ICE_SID_METADATA); if (!meta) { ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; } hw->pkg_ver = meta->ver; @@ -1104,10 +1186,10 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) seg_hdr->seg_id); } else { ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; } - return 0; + return ICE_DDP_PKG_SUCCESS; } /** @@ -1116,21 +1198,22 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) * * Store details of the package currently loaded in HW into the HW structure. */ -static enum ice_status ice_get_pkg_info(struct ice_hw *hw) +static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) { + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; struct ice_aqc_get_pkg_info_resp *pkg_info; - enum ice_status status; u16 size; u32 i; size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); pkg_info = kzalloc(size, GFP_KERNEL); if (!pkg_info) - return ICE_ERR_NO_MEMORY; + return ICE_DDP_PKG_ERR; - status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); - if (status) + if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { + state = ICE_DDP_PKG_ERR; goto init_pkg_free_alloc; + } for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { #define ICE_PKG_FLAG_COUNT 4 @@ -1165,7 +1248,7 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw) init_pkg_free_alloc: kfree(pkg_info); - return status; + return state; } /** @@ -1176,28 +1259,28 @@ init_pkg_free_alloc: * Verifies various attributes of the package file, including length, format * version, and the requirement of at least one segment. */ -static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) { u32 seg_count; u32 i; if (len < struct_size(pkg, seg_offset, 1)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; /* pkg must have at least one segment */ seg_count = le32_to_cpu(pkg->seg_count); if (seg_count < 1) - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; /* make sure segment array fits in package length */ if (len < struct_size(pkg, seg_offset, seg_count)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; /* all segments must fit within length */ for (i = 0; i < seg_count; i++) { @@ -1206,16 +1289,16 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) /* segment header must fit */ if (len < off + sizeof(*seg)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); /* segment body must fit */ if (len < off + le32_to_cpu(seg->seg_size)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; } - return 0; + return ICE_DDP_PKG_SUCCESS; } /** @@ -1259,13 +1342,18 @@ static void ice_init_pkg_regs(struct ice_hw *hw) * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR * definitions. */ -static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) { - if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || - pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) - return ICE_ERR_NOT_SUPPORTED; + if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; + else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; - return 0; + return ICE_DDP_PKG_SUCCESS; } /** @@ -1276,20 +1364,20 @@ static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) * * This function checks the package version compatibility with driver and NVM */ -static enum ice_status +static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, struct ice_seg **seg) { struct ice_aqc_get_pkg_info_resp *pkg; - enum ice_status status; + enum ice_ddp_state state; u16 size; u32 i; /* Check package version compatibility */ - status = ice_chk_pkg_version(&hw->pkg_ver); - if (status) { + state = ice_chk_pkg_version(&hw->pkg_ver); + if (state) { ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); - return status; + return state; } /* find ICE segment in given package */ @@ -1297,18 +1385,19 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, ospkg); if (!*seg) { ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; } /* Check if FW is compatible with the OS package */ size = struct_size(pkg, pkg_info, ICE_PKG_CNT); pkg = kzalloc(size, GFP_KERNEL); if (!pkg) - return ICE_ERR_NO_MEMORY; + return ICE_DDP_PKG_ERR; - status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); - if (status) + if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { + state = ICE_DDP_PKG_LOAD_ERROR; goto fw_ddp_compat_free_alloc; + } for (i = 0; i < le32_to_cpu(pkg->count); i++) { /* loop till we find the NVM package */ @@ -1318,7 +1407,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, pkg->pkg_info[i].ver.major || (*seg)->hdr.seg_format_ver.minor > pkg->pkg_info[i].ver.minor) { - status = ICE_ERR_FW_DDP_MISMATCH; + state = ICE_DDP_PKG_FW_MISMATCH; ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); } /* done processing NVM package so break */ @@ -1326,7 +1415,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, } fw_ddp_compat_free_alloc: kfree(pkg); - return status; + return state; } /** @@ -1367,7 +1456,7 @@ ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) * and store the index number in struct ice_switch_info *switch_info * in HW for following use. */ -static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) +static int ice_get_prof_index_max(struct ice_hw *hw) { u16 prof_index = 0, j, max_prof_index = 0; struct ice_pkg_enum state; @@ -1379,7 +1468,7 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) memset(&state, 0, sizeof(state)); if (!hw->seg) - return ICE_ERR_PARAM; + return -EINVAL; ice_seg = hw->seg; @@ -1410,6 +1499,34 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) } /** + * ice_get_ddp_pkg_state - get DDP pkg state after download + * @hw: pointer to the HW struct + * @already_loaded: indicates if pkg was already loaded onto the device + */ +static enum ice_ddp_state +ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded) +{ + if (hw->pkg_ver.major == hw->active_pkg_ver.major && + hw->pkg_ver.minor == hw->active_pkg_ver.minor && + hw->pkg_ver.update == hw->active_pkg_ver.update && + hw->pkg_ver.draft == hw->active_pkg_ver.draft && + !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { + if (already_loaded) + return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; + else + return ICE_DDP_PKG_SUCCESS; + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; + } else { + return ICE_DDP_PKG_ERR; + } +} + +/** * ice_init_pkg - initialize/download package * @hw: pointer to the hardware structure * @buf: pointer to the package buffer @@ -1434,53 +1551,54 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this * case. */ -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) { + bool already_loaded = false; + enum ice_ddp_state state; struct ice_pkg_hdr *pkg; - enum ice_status status; struct ice_seg *seg; if (!buf || !len) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; pkg = (struct ice_pkg_hdr *)buf; - status = ice_verify_pkg(pkg, len); - if (status) { + state = ice_verify_pkg(pkg, len); + if (state) { ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", - status); - return status; + state); + return state; } /* initialize package info */ - status = ice_init_pkg_info(hw, pkg); - if (status) - return status; + state = ice_init_pkg_info(hw, pkg); + if (state) + return state; /* before downloading the package, check package version for * compatibility with driver */ - status = ice_chk_pkg_compat(hw, pkg, &seg); - if (status) - return status; + state = ice_chk_pkg_compat(hw, pkg, &seg); + if (state) + return state; /* initialize package hints and then download package */ ice_init_pkg_hints(hw, seg); - status = ice_download_pkg(hw, seg); - if (status == ICE_ERR_AQ_NO_WORK) { + state = ice_download_pkg(hw, seg); + if (state == ICE_DDP_PKG_ALREADY_LOADED) { ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); - status = 0; + already_loaded = true; } /* Get information on the package currently loaded in HW, then make sure * the driver is compatible with this version. */ - if (!status) { - status = ice_get_pkg_info(hw); - if (!status) - status = ice_chk_pkg_version(&hw->active_pkg_ver); + if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { + state = ice_get_pkg_info(hw); + if (!state) + state = ice_get_ddp_pkg_state(hw, already_loaded); } - if (!status) { + if (ice_is_init_pkg_successful(state)) { hw->seg = seg; /* on successful package download update other required * registers to support the package and fill HW tables @@ -1488,13 +1606,14 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) */ ice_init_pkg_regs(hw); ice_fill_blk_tbls(hw); + ice_fill_hw_ptype(hw); ice_get_prof_index_max(hw); } else { ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", - status); + state); } - return status; + return state; } /** @@ -1520,18 +1639,19 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) * package buffer, as the new copy will be managed by this function and * related routines. */ -enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) +enum ice_ddp_state +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) { - enum ice_status status; + enum ice_ddp_state state; u8 *buf_copy; if (!buf || !len) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); - status = ice_init_pkg(hw, buf_copy, len); - if (status) { + state = ice_init_pkg(hw, buf_copy, len); + if (!ice_is_init_pkg_successful(state)) { /* Free the copy, since we failed to initialize the package */ devm_kfree(ice_hw_to_dev(hw), buf_copy); } else { @@ -1540,7 +1660,23 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) hw->pkg_size = len; } - return status; + return state; +} + +/** + * ice_is_init_pkg_successful - check if DDP init was successful + * @state: state of the DDP pkg after download + */ +bool ice_is_init_pkg_successful(enum ice_ddp_state state) +{ + switch (state) { + case ICE_DDP_PKG_SUCCESS: + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + return true; + default: + return false; + } } /** @@ -1644,7 +1780,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, * NOTE: The caller of the function is responsible for freeing the memory * allocated for every list entry. */ -enum ice_status +int ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, unsigned long *bm, struct list_head *fv_list) { @@ -1658,7 +1794,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, memset(&state, 0, sizeof(state)); if (!ids_cnt || !hw->seg) - return ICE_ERR_PARAM; + return -EINVAL; ice_seg = hw->seg; do { @@ -1702,7 +1838,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, } } while (fv); if (list_empty(fv_list)) - return ICE_ERR_CFG; + return -EIO; return 0; err: @@ -1711,7 +1847,7 @@ err: devm_kfree(ice_hw_to_dev(hw), fvl); } - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -1779,7 +1915,7 @@ static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) * result in some wasted space in the buffer. * Note: all package contents must be in Little Endian form. */ -static enum ice_status +static int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) { struct ice_buf_hdr *buf; @@ -1787,17 +1923,17 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) u16 data_end; if (!bld) - return ICE_ERR_PARAM; + return -EINVAL; buf = (struct ice_buf_hdr *)&bld->buf; /* already an active section, can't increase table size */ section_count = le16_to_cpu(buf->section_count); if (section_count > 0) - return ICE_ERR_CFG; + return -EIO; if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) - return ICE_ERR_CFG; + return -EIO; bld->reserved_section_table_entries += count; data_end = le16_to_cpu(buf->data_end) + @@ -1899,9 +2035,11 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) * ice_get_open_tunnel_port - retrieve an open tunnel port * @hw: pointer to the HW structure * @port: returns open port + * @type: type of tunnel, can be TNL_LAST if it doesn't matter */ bool -ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port) +ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port, + enum ice_tunnel_type type) { bool res = false; u16 i; @@ -1909,7 +2047,8 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port) mutex_lock(&hw->tnl_lock); for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) - if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) { + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port && + (type == TNL_LAST || type == hw->tnl.tbl[i].type)) { *port = hw->tnl.tbl[i].port; res = true; break; @@ -1956,19 +2095,19 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type, * creating a package buffer with the tunnel info and issuing an update package * command. */ -static enum ice_status +static int ice_create_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type, u16 port) { struct ice_boost_tcam_section *sect_rx, *sect_tx; - enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; + int status = -ENOSPC; mutex_lock(&hw->tnl_lock); bld = ice_pkg_buf_alloc(hw); if (!bld) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_create_tunnel_end; } @@ -2027,26 +2166,26 @@ ice_create_tunnel_end: * targeting the specific updates requested and then performing an update * package. */ -static enum ice_status +static int ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type, u16 port) { struct ice_boost_tcam_section *sect_rx, *sect_tx; - enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; + int status = -ENOSPC; mutex_lock(&hw->tnl_lock); if (WARN_ON(!hw->tnl.tbl[index].valid || hw->tnl.tbl[index].type != type || hw->tnl.tbl[index].port != port)) { - status = ICE_ERR_OUT_OF_RANGE; + status = -EIO; goto ice_destroy_tunnel_end; } bld = ice_pkg_buf_alloc(hw); if (!bld) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_destroy_tunnel_end; } @@ -2094,7 +2233,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; enum ice_tunnel_type tnl_type; - enum ice_status status; + int status; u16 index; tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE; @@ -2102,8 +2241,8 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port)); if (status) { - netdev_err(netdev, "Error adding UDP tunnel - %s\n", - ice_stat_str(status)); + netdev_err(netdev, "Error adding UDP tunnel - %d\n", + status); return -EIO; } @@ -2118,15 +2257,15 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; enum ice_tunnel_type tnl_type; - enum ice_status status; + int status; tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE; status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type, ntohs(ti->port)); if (status) { - netdev_err(netdev, "Error removing UDP tunnel - %s\n", - ice_stat_str(status)); + netdev_err(netdev, "Error removing UDP tunnel - %d\n", + status); return -EIO; } @@ -2142,17 +2281,17 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, * @prot: variable to receive the protocol ID * @off: variable to receive the protocol offset */ -enum ice_status +int ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off) { struct ice_fv_word *fv_ext; if (prof >= hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; if (fv_idx >= hw->blk[blk].es.fvw) - return ICE_ERR_PARAM; + return -EINVAL; fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); @@ -2175,11 +2314,11 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, * PTG ID that contains it through the PTG parameter, with the value of * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. */ -static enum ice_status +static int ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) { if (ptype >= ICE_XLT1_CNT || !ptg) - return ICE_ERR_PARAM; + return -EINVAL; *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; return 0; @@ -2209,21 +2348,21 @@ static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) * This function will remove the ptype from the specific PTG, and move it to * the default PTG (ICE_DEFAULT_PTG). */ -static enum ice_status +static int ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { struct ice_ptg_ptype **ch; struct ice_ptg_ptype *p; if (ptype > ICE_XLT1_CNT - 1) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Should not happen if .in_use is set, bad config */ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) - return ICE_ERR_CFG; + return -EIO; /* find the ptype within this PTG, and bypass the link over it */ p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; @@ -2256,17 +2395,17 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the * default PTG. */ -static enum ice_status +static int ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { - enum ice_status status; u8 original_ptg; + int status; if (ptype > ICE_XLT1_CNT - 1) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); if (status) @@ -2401,11 +2540,11 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2) * This function will lookup the VSI entry in the XLT2 list and return * the VSI group its associated with. */ -static enum ice_status +static int ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) { if (!vsig || vsi >= ICE_MAX_VSI) - return ICE_ERR_PARAM; + return -EINVAL; /* As long as there's a default or valid VSIG associated with the input * VSI, the functions returns a success. Any handling of VSIG will be @@ -2470,7 +2609,7 @@ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) * for, the list must match exactly, including the order in which the * characteristics are listed. */ -static enum ice_status +static int ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, struct list_head *chs, u16 *vsig) { @@ -2484,7 +2623,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -2496,8 +2635,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, * The function will remove all VSIs associated with the input VSIG and move * them to the DEFAULT_VSIG and mark the VSIG available. */ -static enum ice_status -ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) +static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) { struct ice_vsig_prof *dtmp, *del; struct ice_vsig_vsi *vsi_cur; @@ -2505,10 +2643,10 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) idx = vsig & ICE_VSIG_IDX_M; if (idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; @@ -2557,7 +2695,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) * The function will remove the input VSI from its VSI group and move it * to the DEFAULT_VSIG. */ -static enum ice_status +static int ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; @@ -2566,10 +2704,10 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* entry already in default VSIG, don't have to remove */ if (idx == ICE_DEFAULT_VSIG) @@ -2577,7 +2715,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; if (!(*vsi_head)) - return ICE_ERR_CFG; + return -EIO; vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; vsi_cur = (*vsi_head); @@ -2594,7 +2732,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) /* verify if VSI was removed from group list */ if (!vsi_cur) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; vsi_cur->vsig = ICE_DEFAULT_VSIG; vsi_cur->changed = 1; @@ -2615,24 +2753,24 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) * move the entry to the DEFAULT_VSIG, update the original VSIG and * then move entry to the new VSIG. */ -static enum ice_status +static int ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi *tmp; - enum ice_status status; u16 orig_vsig, idx; + int status; idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; /* if VSIG not in use and VSIG is not default type this VSIG * doesn't exist. */ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && vsig != ICE_DEFAULT_VSIG) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (status) @@ -2738,7 +2876,7 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) * @masks: masks for FV * @prof_id: receives the profile ID */ -static enum ice_status +static int ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, struct ice_fv_word *fv, u16 *masks, u8 *prof_id) { @@ -2749,7 +2887,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, * field vector and mask. This will cause rule interference. */ if (blk == ICE_BLK_FD) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; for (i = 0; i < (u8)es->count; i++) { u16 off = i * es->fvw; @@ -2765,7 +2903,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -2818,14 +2956,14 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) * This function allocates a new entry in a Profile ID TCAM for a specific * block. */ -static enum ice_status +static int ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, u16 *tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); } @@ -2838,13 +2976,13 @@ ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, * * This function frees an entry in a Profile ID TCAM for a specific block. */ -static enum ice_status +static int ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_free_hw_res(hw, res_type, 1, &tcam_idx); } @@ -2858,15 +2996,14 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) * This function allocates a new profile ID, which also corresponds to a Field * Vector (Extraction Sequence) entry. */ -static enum ice_status -ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) +static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) { - enum ice_status status; u16 res_type; u16 get_prof; + int status; if (!ice_prof_id_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); if (!status) @@ -2883,14 +3020,13 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) * * This function frees a profile ID, which also corresponds to a Field Vector. */ -static enum ice_status -ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) +static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { u16 tmp_prof_id = (u16)prof_id; u16 res_type; if (!ice_prof_id_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); } @@ -2901,11 +3037,10 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to increment the reference count */ -static enum ice_status -ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) +static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; hw->blk[blk].es.ref_count[prof_id]++; @@ -3022,17 +3157,17 @@ static void ice_init_all_prof_masks(struct ice_hw *hw) * @mask: the 16-bit mask * @mask_idx: variable to receive the mask index */ -static enum ice_status +static int ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask, u16 *mask_idx) { bool found_unused = false, found_copy = false; - enum ice_status status = ICE_ERR_MAX_LIMIT; u16 unused_idx = 0, copy_idx = 0; + int status = -ENOSPC; u16 i; if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->blk[blk].masks.lock); @@ -3090,15 +3225,15 @@ err_ice_alloc_prof_mask: * @blk: hardware block * @mask_idx: index of mask */ -static enum ice_status +static int ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx) { if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; if (!(mask_idx >= hw->blk[blk].masks.first && mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; mutex_lock(&hw->blk[blk].masks.lock); @@ -3132,14 +3267,14 @@ exit_ice_free_prof_mask: * @blk: hardware block * @prof_id: profile ID */ -static enum ice_status +static int ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id) { u32 mask_bm; u16 i; if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; mask_bm = hw->blk[blk].es.mask_ena[prof_id]; for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++) @@ -3194,7 +3329,7 @@ static void ice_shutdown_all_prof_masks(struct ice_hw *hw) * @prof_id: profile ID * @masks: masks */ -static enum ice_status +static int ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, u16 *masks) { @@ -3224,7 +3359,7 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, if (ena_mask & BIT(i)) ice_free_prof_mask(hw, blk, i); - return ICE_ERR_OUT_OF_RANGE; + return -EIO; } /* enable the masks for this profile */ @@ -3266,11 +3401,11 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to decrement the reference count */ -static enum ice_status +static int ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (!--hw->blk[blk].es.ref_count[prof_id]) { @@ -3708,7 +3843,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) * ice_init_hw_tbls - init hardware table memory * @hw: pointer to the hardware structure */ -enum ice_status ice_init_hw_tbls(struct ice_hw *hw) +int ice_init_hw_tbls(struct ice_hw *hw) { u8 i; @@ -3827,7 +3962,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) err: ice_free_hw_tbls(hw); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -3843,7 +3978,7 @@ err: * @nm_msk: never match mask * @key: output of profile ID key */ -static enum ice_status +static int ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], @@ -3899,7 +4034,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, * @dc_msk: don't care mask * @nm_msk: never match mask */ -static enum ice_status +static int ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], @@ -3907,7 +4042,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) { struct ice_prof_tcam_entry; - enum ice_status status; + int status; status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); @@ -3926,7 +4061,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, * @vsig: VSIG to query * @refs: pointer to variable to receive the reference count */ -static enum ice_status +static int ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) { u16 idx = vsig & ICE_VSIG_IDX_M; @@ -3935,7 +4070,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) *refs = 0; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; while (ptr) { @@ -3976,7 +4111,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -3996,7 +4131,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, sizeof(p->es[0])); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->prof_id); @@ -4014,7 +4149,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4030,7 +4165,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, struct_size(p, entry, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->entry[0].addr = cpu_to_le16(tmp->tcam_idx); @@ -4050,7 +4185,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4066,7 +4201,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, struct_size(p, value, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->ptype); @@ -4082,7 +4217,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4101,7 +4236,7 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, struct_size(p, value, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->vsi); @@ -4121,18 +4256,18 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, * @blk: hardware block * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, struct list_head *chgs) { struct ice_buf_build *b; struct ice_chs_chg *tmp; - enum ice_status status; u16 pkg_sects; u16 xlt1 = 0; u16 xlt2 = 0; u16 tcam = 0; u16 es = 0; + int status; u16 sects; /* count number of sections we need */ @@ -4164,7 +4299,7 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, /* Build update package buffer */ b = ice_pkg_buf_alloc(hw); if (!b) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_pkg_buf_reserve_section(b, sects); if (status) @@ -4201,13 +4336,13 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, */ pkg_sects = ice_pkg_buf_get_active_sections(b); if (!pkg_sects || pkg_sects != sects) { - status = ICE_ERR_INVAL_SIZE; + status = -EINVAL; goto error_tmp; } /* update package */ status = ice_update_pkg(hw, ice_pkg_buf(b), 1); - if (status == ICE_ERR_AQ_ERROR) + if (status == -EIO) ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); error_tmp: @@ -4273,7 +4408,7 @@ static const struct ice_fd_src_dst_pair ice_fd_pairs[] = { * @prof_id: profile ID * @es: extraction sequence (length of array is determined by the block) */ -static enum ice_status +static int ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) { DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); @@ -4328,7 +4463,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) /* check for room */ if (first_free + 1 < (s8)ice_fd_pairs[index].count) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* place in extraction sequence */ for (k = 0; k < ice_fd_pairs[index].count; k++) { @@ -4338,7 +4473,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) ice_fd_pairs[index].off + (k * 2); if (k > first_free) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* keep track of non-relevant fields */ mask_sel |= BIT(first_free - k); @@ -4449,7 +4584,7 @@ ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type, * @attr: array of attributes that will be considered * @attr_cnt: number of elements in the attribute array */ -static enum ice_status +static int ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, const struct ice_ptype_attributes *attr, u16 attr_cnt) { @@ -4465,11 +4600,11 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, &prof->attr[prof->ptg_cnt]); if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; } if (!found) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; return 0; } @@ -4490,7 +4625,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, * it will not be written until the first call to ice_add_flow that specifies * the ID value used here. */ -enum ice_status +int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, struct ice_fv_word *es, u16 *masks) @@ -4498,9 +4633,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); struct ice_prof_map *prof; - enum ice_status status; u8 byte = 0; u8 prof_id; + int status; bitmap_zero(ptgs_used, ICE_XLT1_CNT); @@ -4538,7 +4673,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], /* add profile info */ prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL); if (!prof) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_prof; } @@ -4581,7 +4716,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], */ status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt); - if (status == ICE_ERR_MAX_LIMIT) + if (status == -ENOSPC) break; if (status) { /* This is simple a PTYPE/PTG with no @@ -4658,14 +4793,13 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) * @blk: hardware block * @idx: the index to release */ -static enum ice_status -ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) +static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) { /* Masks to invoke a never match entry */ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; - enum ice_status status; + int status; /* write the TCAM entry */ status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, @@ -4685,11 +4819,11 @@ ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) * @blk: hardware block * @prof: pointer to profile structure to remove */ -static enum ice_status +static int ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, struct ice_vsig_prof *prof) { - enum ice_status status; + int status; u16 i; for (i = 0; i < prof->tcam_count; i++) @@ -4698,7 +4832,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, status = ice_rel_tcam_idx(hw, blk, prof->tcam[i].tcam_idx); if (status) - return ICE_ERR_HW_TABLE; + return -EIO; } return 0; @@ -4711,14 +4845,14 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, * @vsig: the VSIG to remove * @chg: the change list */ -static enum ice_status +static int ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_vsi *vsi_cur; struct ice_vsig_prof *d, *t; - enum ice_status status; + int status; /* remove TCAM entries */ list_for_each_entry_safe(d, t, @@ -4745,7 +4879,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; p->type = ICE_VSIG_REM; p->orig_vsig = vsig; @@ -4768,13 +4902,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, * @hdl: profile handle indicating which profile to remove * @chg: list to receive a record of changes */ -static enum ice_status +static int ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, struct list_head *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_prof *p, *t; - enum ice_status status; + int status; list_for_each_entry_safe(p, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, @@ -4792,7 +4926,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, return status; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -4801,12 +4935,11 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, * @blk: hardware block * @id: profile tracking ID */ -static enum ice_status -ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) +static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_chs_chg *del, *tmp; - enum ice_status status; struct list_head chg; + int status; u16 i; INIT_LIST_HEAD(&chg); @@ -4842,16 +4975,16 @@ err_ice_rem_flow_all: * previously created through ice_add_prof. If any existing entries * are associated with this profile, they will be removed as well. */ -enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) +int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *pmap; - enum ice_status status; + int status; mutex_lock(&hw->blk[blk].es.prof_map_lock); pmap = ice_search_prof_id(hw, blk, id); if (!pmap) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_rem_prof; } @@ -4878,20 +5011,20 @@ err_ice_rem_prof: * @hdl: profile handle * @chg: change list */ -static enum ice_status +static int ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, struct list_head *chg) { - enum ice_status status = 0; struct ice_prof_map *map; struct ice_chs_chg *p; + int status = 0; u16 i; mutex_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_get_prof; } @@ -4901,7 +5034,7 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_get_prof; } @@ -4933,7 +5066,7 @@ err_ice_get_prof: * * This routine makes a copy of the list of profiles in the specified VSIG. */ -static enum ice_status +static int ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *lst) { @@ -4961,7 +5094,7 @@ err_ice_get_profs_vsig: devm_kfree(ice_hw_to_dev(hw), ent1); } - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -4971,25 +5104,25 @@ err_ice_get_profs_vsig: * @lst: the list to be added to * @hdl: profile handle of entry to add */ -static enum ice_status +static int ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, struct list_head *lst, u64 hdl) { - enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *p; + int status = 0; u16 i; mutex_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_add_prof_to_lst; } p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_prof_to_lst; } @@ -5018,17 +5151,17 @@ err_ice_add_prof_to_lst: * @vsig: the VSIG to move the VSI to * @chg: the change list */ -static enum ice_status +static int ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; u16 orig_vsig; + int status; p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (!status) @@ -5078,13 +5211,13 @@ ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg) * * This function appends an enable or disable TCAM entry in the change log */ -static enum ice_status +static int ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, u16 vsig, struct ice_tcam_inf *tcam, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; + int status; u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; @@ -5117,7 +5250,7 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, /* add TCAM to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, tcam->ptg, vsig, 0, tcam->attr.flags, @@ -5151,13 +5284,13 @@ err_ice_prof_tcam_ena_dis: * @vsig: the VSIG for which to adjust profile priorities * @chg: the change list */ -static enum ice_status +static int ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *chg) { DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); struct ice_vsig_prof *t; - enum ice_status status; + int status; u16 idx; bitmap_zero(ptgs_used, ICE_XLT1_CNT); @@ -5222,7 +5355,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, * @rev: true to add entries to the end of the list * @chg: the change list */ -static enum ice_status +static int ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, bool rev, struct list_head *chg) { @@ -5230,26 +5363,26 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; - enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *t; struct ice_chs_chg *p; u16 vsig_idx, i; + int status = 0; /* Error, if this VSIG already has this profile */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; /* new VSIG profile structure */ t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL); if (!t) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mutex_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_add_prof_id_vsig; } @@ -5264,7 +5397,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, /* add TCAM to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_prof_id_vsig; } @@ -5334,21 +5467,21 @@ err_ice_add_prof_id_vsig: * @hdl: the profile handle of the profile that will be added to the VSIG * @chg: the change list */ -static enum ice_status +static int ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; u16 new_vsig; + int status; p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; new_vsig = ice_vsig_alloc(hw, blk); if (!new_vsig) { - status = ICE_ERR_HW_TABLE; + status = -EIO; goto err_ice_create_prof_id_vsig; } @@ -5384,18 +5517,18 @@ err_ice_create_prof_id_vsig: * @new_vsig: return of new VSIG * @chg: the change list */ -static enum ice_status +static int ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, struct list_head *lst, u16 *new_vsig, struct list_head *chg) { struct ice_vsig_prof *t; - enum ice_status status; + int status; u16 vsig; vsig = ice_vsig_alloc(hw, blk); if (!vsig) - return ICE_ERR_HW_TABLE; + return -EIO; status = ice_move_vsi(hw, blk, vsi, vsig, chg); if (status) @@ -5425,8 +5558,8 @@ static bool ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) { struct ice_vsig_prof *t; - enum ice_status status; struct list_head lst; + int status; INIT_LIST_HEAD(&lst); @@ -5456,14 +5589,14 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be enabled. */ -enum ice_status +int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct list_head union_lst; - enum ice_status status; struct list_head chg; + int status; u16 vsig; INIT_LIST_HEAD(&union_lst); @@ -5489,7 +5622,7 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) * scenario */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { - status = ICE_ERR_ALREADY_EXISTS; + status = -EEXIST; goto err_ice_add_prof_id_flow; } @@ -5597,7 +5730,7 @@ err_ice_add_prof_id_flow: * @lst: list to remove the profile from * @hdl: the profile handle indicating the profile to remove */ -static enum ice_status +static int ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) { struct ice_vsig_prof *ent, *tmp; @@ -5609,7 +5742,7 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -5623,13 +5756,13 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be disabled. */ -enum ice_status +int ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct list_head chg, copy; - enum ice_status status; + int status; u16 vsig; INIT_LIST_HEAD(©); @@ -5724,7 +5857,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) } } } else { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } /* update hardware tables */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 344c2637facd..6cbc29bcb02f 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -18,10 +18,67 @@ #define ICE_PKG_CNT 4 -enum ice_status +enum ice_ddp_state { + /* Indicates that this call to ice_init_pkg + * successfully loaded the requested DDP package + */ + ICE_DDP_PKG_SUCCESS = 0, + + /* Generic error for already loaded errors, it is mapped later to + * the more specific one (one of the next 3) + */ + ICE_DDP_PKG_ALREADY_LOADED = -1, + + /* Indicates that a DDP package of the same version has already been + * loaded onto the device by a previous call or by another PF + */ + ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, + + /* The device has a DDP package that is not supported by the driver */ + ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, + + /* The device has a compatible package + * (but different from the request) already loaded + */ + ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, + + /* The firmware loaded on the device is not compatible with + * the DDP package loaded + */ + ICE_DDP_PKG_FW_MISMATCH = -5, + + /* The DDP package file is invalid */ + ICE_DDP_PKG_INVALID_FILE = -6, + + /* The version of the DDP package provided is higher than + * the driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, + + /* The version of the DDP package provided is lower than the + * driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, + + /* The signature of the DDP package file provided is invalid */ + ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9, + + /* The DDP package file security revision is too low and not + * supported by firmware + */ + ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10, + + /* An error occurred in firmware while loading the DDP package */ + ICE_DDP_PKG_LOAD_ERROR = -11, + + /* Other errors */ + ICE_DDP_PKG_ERR = -12 +}; + +int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_change_lock(struct ice_hw *hw); -enum ice_status +int ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off); void @@ -29,32 +86,37 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type, unsigned long *bm); void ice_init_prof_result_bm(struct ice_hw *hw); -enum ice_status +int ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, unsigned long *bm, struct list_head *fv_list); bool -ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port); +ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port, + enum ice_tunnel_type type); int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti); int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti); -enum ice_status +/* Rx parser PTYPE functions */ +bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype); + +/* XLT2/VSI group functions */ +int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, struct ice_fv_word *es, u16 *masks); -enum ice_status +int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); -enum ice_status +int ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); -enum ice_status +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); +enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); -enum ice_status ice_init_hw_tbls(struct ice_hw *hw); +bool ice_is_init_pkg_successful(enum ice_ddp_state state); +int ice_init_hw_tbls(struct ice_hw *hw); void ice_free_seg(struct ice_hw *hw); void ice_fill_blk_tbls(struct ice_hw *hw); void ice_clear_hw_tbls(struct ice_hw *hw); void ice_free_hw_tbls(struct ice_hw *hw); -enum ice_status -ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); +int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); #endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 0f572a36d021..fc087e0b5292 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -160,6 +160,7 @@ struct ice_meta_sect { #define ICE_SID_CDID_KEY_BUILDER_RSS 47 #define ICE_SID_CDID_REDIR_RSS 48 +#define ICE_SID_RXPARSER_MARKER_PTYPE 55 #define ICE_SID_RXPARSER_BOOST_TCAM 56 #define ICE_SID_TXPARSER_BOOST_TCAM 66 @@ -201,6 +202,24 @@ enum ice_sect { ICE_SECT_COUNT }; +/* Packet Type (PTYPE) values */ +#define ICE_PTYPE_MAC_PAY 1 +#define ICE_PTYPE_IPV4_PAY 23 +#define ICE_PTYPE_IPV4_UDP_PAY 24 +#define ICE_PTYPE_IPV4_TCP_PAY 26 +#define ICE_PTYPE_IPV4_SCTP_PAY 27 +#define ICE_PTYPE_IPV6_PAY 89 +#define ICE_PTYPE_IPV6_UDP_PAY 90 +#define ICE_PTYPE_IPV6_TCP_PAY 92 +#define ICE_PTYPE_IPV6_SCTP_PAY 93 +#define ICE_MAC_IPV4_ESP 160 +#define ICE_MAC_IPV6_ESP 161 +#define ICE_MAC_IPV4_AH 162 +#define ICE_MAC_IPV6_AH 163 +#define ICE_MAC_IPV4_NAT_T_ESP 164 +#define ICE_MAC_IPV6_NAT_T_ESP 165 +#define ICE_MAC_IPV4_GTPU 329 +#define ICE_MAC_IPV6_GTPU 330 #define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331 #define ICE_MAC_IPV4_GTPU_IPV4_PAY 332 #define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333 @@ -221,6 +240,10 @@ enum ice_sect { #define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348 #define ICE_MAC_IPV6_GTPU_IPV6_TCP 349 #define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350 +#define ICE_MAC_IPV4_PFCP_SESSION 352 +#define ICE_MAC_IPV6_PFCP_SESSION 354 +#define ICE_MAC_IPV4_L2TPV3 360 +#define ICE_MAC_IPV6_L2TPV3 361 /* Attributes that can modify PTYPE definitions. * @@ -329,6 +352,25 @@ struct ice_boost_tcam_section { sizeof(struct ice_boost_tcam_entry), \ sizeof(struct ice_boost_tcam_entry)) +/* package Marker Ptype TCAM entry */ +struct ice_marker_ptype_tcam_entry { +#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024 + __le16 addr; + __le16 ptype; + u8 keys[20]; +}; + +struct ice_marker_ptype_tcam_section { + __le16 count; + __le16 reserved; + struct ice_marker_ptype_tcam_entry tcam[]; +}; + +#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \ + ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \ + sizeof(struct ice_marker_ptype_tcam_entry), \ + sizeof(struct ice_marker_ptype_tcam_entry)) + struct ice_xlt1_section { __le16 count; __le16 offset; diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index f160672448a0..2c5332953679 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -609,8 +609,6 @@ struct ice_flow_prof_params { ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \ ICE_FLOW_SEG_HDR_NAT_T_ESP) -#define ICE_FLOW_SEG_HDRS_L2_MASK \ - (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) #define ICE_FLOW_SEG_HDRS_L3_MASK \ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP) #define ICE_FLOW_SEG_HDRS_L4_MASK \ @@ -625,8 +623,7 @@ struct ice_flow_prof_params { * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided */ -static enum ice_status -ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) +static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) { u8 i; @@ -634,12 +631,12 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) /* Multiple L3 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) - return ICE_ERR_PARAM; + return -EINVAL; /* Multiple L4 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) - return ICE_ERR_PARAM; + return -EINVAL; } return 0; @@ -700,8 +697,7 @@ static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) * This function identifies the packet types associated with the protocol * headers being present in packet segments of the specified flow profile. */ -static enum ice_status -ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) +static int ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) { struct ice_flow_prof *prof; u8 i; @@ -898,7 +894,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) * field. It then allocates one or more extraction sequence entries for the * given field, and fill the entries with protocol ID and offset information. */ -static enum ice_status +static int ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, u8 seg, enum ice_flow_field fld, u64 match) { @@ -1035,7 +1031,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, prot_id = ICE_PROT_GRE_OF; break; default: - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; } /* Each extraction sequence entry is a word in size, and extracts a @@ -1073,7 +1069,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, * does not exceed the block's capability */ if (params->es_cnt >= fv_words) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse) @@ -1099,7 +1095,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, * @params: information about the flow to be processed * @seg: index of packet segment whose raw fields are to be extracted */ -static enum ice_status +static int ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, u8 seg) { @@ -1112,12 +1108,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, if (params->prof->segs[seg].raws_cnt > ARRAY_SIZE(params->prof->segs[seg].raws)) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* Offsets within the segment headers are not supported */ hdrs_sz = ice_flow_calc_seg_sz(params, seg); if (!hdrs_sz) - return ICE_ERR_PARAM; + return -EINVAL; fv_words = hw->blk[params->blk].es.fvw; @@ -1150,7 +1146,7 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, */ if (params->es_cnt >= hw->blk[params->blk].es.count || params->es_cnt >= ICE_MAX_FV_WORDS) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse) @@ -1176,12 +1172,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, * This function iterates through all matched fields in the given segments, and * creates an extraction sequence for the fields. */ -static enum ice_status +static int ice_flow_create_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof_params *params) { struct ice_flow_prof *prof = params->prof; - enum ice_status status = 0; + int status = 0; u8 i; for (i = 0; i < prof->segs_cnt; i++) { @@ -1210,10 +1206,10 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw, * @hw: pointer to the HW struct * @params: information about the flow to be processed */ -static enum ice_status +static int ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) { - enum ice_status status; + int status; status = ice_flow_proc_seg_hdrs(params); if (status) @@ -1229,7 +1225,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) status = 0; break; default: - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; } return status; @@ -1329,12 +1325,12 @@ ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) * @blk: classification stage * @entry: flow entry to be removed */ -static enum ice_status +static int ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, struct ice_flow_entry *entry) { if (!entry) - return ICE_ERR_BAD_PTR; + return -EINVAL; list_del(&entry->l_entry); @@ -1355,27 +1351,27 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, * * Assumption: the caller has acquired the lock to the profile list */ -static enum ice_status +static int ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof) { struct ice_flow_prof_params *params; - enum ice_status status; + int status; u8 i; if (!prof) - return ICE_ERR_BAD_PTR; + return -EINVAL; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof), GFP_KERNEL); if (!params->prof) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto free_params; } @@ -1432,11 +1428,11 @@ free_params: * * Assumption: the caller has acquired the lock to the profile list */ -static enum ice_status +static int ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof) { - enum ice_status status; + int status; /* Remove all remaining flow entries before removing the flow profile */ if (!list_empty(&prof->entries)) { @@ -1474,11 +1470,11 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ -static enum ice_status +static int ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { - enum ice_status status = 0; + int status = 0; if (!test_bit(vsi_handle, prof->vsis)) { status = ice_add_prof_id_flow(hw, blk, @@ -1505,11 +1501,11 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ -static enum ice_status +static int ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { - enum ice_status status = 0; + int status = 0; if (test_bit(vsi_handle, prof->vsis)) { status = ice_rem_prof_id_flow(hw, blk, @@ -1536,21 +1532,21 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, * @segs_cnt: number of packet segments provided * @prof: stores the returned flow profile added */ -enum ice_status +int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof) { - enum ice_status status; + int status; if (segs_cnt > ICE_FLOW_SEG_MAX) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; if (!segs_cnt) - return ICE_ERR_PARAM; + return -EINVAL; if (!segs) - return ICE_ERR_BAD_PTR; + return -EINVAL; status = ice_flow_val_hdrs(segs, segs_cnt); if (status) @@ -1574,17 +1570,16 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, * @blk: the block for which the flow profile is to be removed * @prof_id: unique ID of the flow profile to be removed */ -enum ice_status -ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) +int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) { struct ice_flow_prof *prof; - enum ice_status status; + int status; mutex_lock(&hw->fl_profs_locks[blk]); prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto out; } @@ -1608,34 +1603,34 @@ out: * @data: pointer to a data buffer containing flow entry's match values/masks * @entry_h: pointer to buffer that receives the new flow entry's handle */ -enum ice_status +int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, void *data, u64 *entry_h) { struct ice_flow_entry *e = NULL; struct ice_flow_prof *prof; - enum ice_status status; + int status; /* No flow entry data is expected for RSS */ if (!entry_h || (!data && blk != ICE_BLK_RSS)) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->fl_profs_locks[blk]); prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } else { /* Allocate memory for the entry being added and associate * the VSI to the found flow profile */ e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); if (!e) - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; else status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); } @@ -1654,7 +1649,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, case ICE_BLK_RSS: break; default: - status = ICE_ERR_NOT_IMPL; + status = -EOPNOTSUPP; goto out; } @@ -1680,15 +1675,14 @@ out: * @blk: classification stage * @entry_h: handle to the flow entry to be removed */ -enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, - u64 entry_h) +int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h) { struct ice_flow_entry *entry; struct ice_flow_prof *prof; - enum ice_status status = 0; + int status = 0; if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) - return ICE_ERR_PARAM; + return -EINVAL; entry = ICE_FLOW_ENTRY_PTR(entry_h); @@ -1836,7 +1830,7 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, * header value to set flow field segment for further use in flow * profile entry or removal. */ -static enum ice_status +static int ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, u32 flow_hdr) { @@ -1853,15 +1847,15 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER) - return ICE_ERR_PARAM; + return -EINVAL; val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); if (val && !is_power_of_2(val)) - return ICE_ERR_CFG; + return -EIO; val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); if (val && !is_power_of_2(val)) - return ICE_ERR_CFG; + return -EIO; return 0; } @@ -1899,14 +1893,14 @@ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) * the VSI from that profile. If the flow profile has no VSIs it will * be removed. */ -enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) +int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *p, *t; - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (list_empty(&hw->fl_profs[blk])) return 0; @@ -1966,7 +1960,7 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { struct ice_rss_cfg *r, *rss_cfg; @@ -1981,7 +1975,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg), GFP_KERNEL); if (!rss_cfg) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; @@ -2022,21 +2016,21 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs, u8 segs_cnt) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *prof = NULL; struct ice_flow_seg_info *segs; - enum ice_status status; + int status; if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) - return ICE_ERR_PARAM; + return -EINVAL; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, @@ -2128,15 +2122,15 @@ exit: * the input fields to hash on, the flow type and use the VSI number to add * a flow entry to the profile. */ -enum ice_status +int ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs) { - enum ice_status status; + int status; if (hashed_flds == ICE_HASH_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->rss_locks); status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, @@ -2159,18 +2153,18 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs, u8 segs_cnt) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_seg_info *segs; struct ice_flow_prof *prof; - enum ice_status status; + int status; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, @@ -2182,7 +2176,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto out; } @@ -2216,15 +2210,15 @@ out: * removed. Calls are made to underlying flow s which will APIs * turn build or update buffers for RSS XLT1 section. */ -enum ice_status __maybe_unused +int __maybe_unused ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs) { - enum ice_status status; + int status; if (hashed_flds == ICE_HASH_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->rss_locks); status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, @@ -2279,20 +2273,19 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * message, convert it to ICE-compatible values, and configure RSS flow * profiles. */ -enum ice_status -ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) +int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) { - enum ice_status status = 0; + int status = 0; u64 hash_flds; if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; /* Make sure no unsupported bits are specified */ if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) - return ICE_ERR_CFG; + return -EIO; hash_flds = avf_hash; @@ -2352,7 +2345,7 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) } if (rss_hash == ICE_HASH_INVALID) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, ICE_FLOW_SEG_HDR_NONE); @@ -2368,13 +2361,13 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle */ -enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { - enum ice_status status = 0; struct ice_rss_cfg *r; + int status = 0; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->rss_locks); list_for_each_entry(r, &hw->rss_list_head, l_entry) { diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 2a2d8c1536cb..d8782b28323e 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -383,18 +383,16 @@ struct ice_rss_cfg { u32 packet_hdr; }; -enum ice_status +int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof); -enum ice_status -ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); -enum ice_status +int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); +int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u64 entry_id, u16 vsi, enum ice_flow_priority prio, void *data, u64 *entry_h); -enum ice_status -ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h); +int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h); void ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, u16 val_loc, u16 mask_loc, u16 last_loc, bool range); @@ -402,14 +400,13 @@ void ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, u16 val_loc, u16 mask_loc); void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); -enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status -ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); -enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status +int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); +int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); +int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); +int ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs); -enum ice_status +int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs); u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs); diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index c2e78eaf4ccb..94903b450345 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -47,12 +47,69 @@ ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info, } /** + * ice_fltr_set_vlan_vsi_promisc + * @hw: pointer to the hardware structure + * @vsi: the VSI being configured + * @promisc_mask: mask of promiscuous config bits + * + * Set VSI with all associated VLANs to given promiscuous mode(s) + */ +int +ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask) +{ + return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); +} + +/** + * ice_fltr_clear_vlan_vsi_promisc + * @hw: pointer to the hardware structure + * @vsi: the VSI being configured + * @promisc_mask: mask of promiscuous config bits + * + * Clear VSI with all associated VLANs to given promiscuous mode(s) + */ +int +ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask) +{ + return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); +} + +/** + * ice_fltr_clear_vsi_promisc - clear specified promiscuous mode(s) + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to clear mode + * @promisc_mask: mask of promiscuous config bits to clear + * @vid: VLAN ID to clear VLAN promiscuous + */ +int +ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid) +{ + return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); +} + +/** + * ice_fltr_set_vsi_promisc - set given VSI to given promiscuous mode(s) + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to configure + * @promisc_mask: mask of promiscuous config bits + * @vid: VLAN ID to set VLAN promiscuous + */ +int +ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid) +{ + return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); +} + +/** * ice_fltr_add_mac_list - add list of MAC filters * @vsi: pointer to VSI struct * @list: list of filters */ -enum ice_status -ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) +int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_mac(&vsi->back->hw, list); } @@ -62,8 +119,7 @@ ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -enum ice_status -ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) +int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_mac(&vsi->back->hw, list); } @@ -73,8 +129,7 @@ ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_vlan(&vsi->back->hw, list); } @@ -84,7 +139,7 @@ ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status +static int ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_vlan(&vsi->back->hw, list); @@ -95,8 +150,7 @@ ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_eth_mac(&vsi->back->hw, list); } @@ -106,8 +160,7 @@ ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_eth_mac(&vsi->back->hw, list); } @@ -207,18 +260,17 @@ ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list, * @action: action to be performed on filter match * @mac_action: pointer to add or remove MAC function */ -static enum ice_status +static int ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action, - enum ice_status (*mac_action)(struct ice_vsi *, - struct list_head *)) + int (*mac_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) { ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } result = mac_action(vsi, &tmp_list); @@ -233,21 +285,21 @@ ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac, * @action: action to be performed on filter match * @mac_action: pointer to add or remove MAC function */ -static enum ice_status +static int ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action, - enum ice_status(*mac_action) + int(*mac_action) (struct ice_vsi *, struct list_head *)) { u8 broadcast[ETH_ALEN]; - enum ice_status result; LIST_HEAD(tmp_list); + int result; eth_broadcast_addr(broadcast); if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) || ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) { ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } result = mac_action(vsi, &tmp_list); @@ -262,17 +314,16 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, * @action: action to be performed on filter match * @vlan_action: pointer to add or remove VLAN function */ -static enum ice_status +static int ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id, enum ice_sw_fwd_act_type action, - enum ice_status (*vlan_action)(struct ice_vsi *, - struct list_head *)) + int (*vlan_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action)) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; result = vlan_action(vsi, &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); @@ -287,17 +338,16 @@ ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id, * @action: action to be performed on filter match * @eth_action: pointer to add or remove ethertype function */ -static enum ice_status +static int ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action, - enum ice_status (*eth_action)(struct ice_vsi *, - struct list_head *)) + int (*eth_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action)) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; result = eth_action(vsi, &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); @@ -310,8 +360,8 @@ ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, * @mac: MAC to add * @action: action to be performed on filter match */ -enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, - enum ice_sw_fwd_act_type action) +int ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list); } @@ -322,7 +372,7 @@ enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, * @mac: MAC to add * @action: action to be performed on filter match */ -enum ice_status +int ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action) { @@ -336,8 +386,8 @@ ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, * @mac: filter MAC to remove * @action: action to remove */ -enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, - enum ice_sw_fwd_act_type action) +int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list); } @@ -348,8 +398,8 @@ enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, * @vlan_id: VLAN ID to add * @action: action to be performed on filter match */ -enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id, - enum ice_sw_fwd_act_type action) +int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_vlan(vsi, vlan_id, action, ice_fltr_add_vlan_list); @@ -361,8 +411,8 @@ enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id, * @vlan_id: filter VLAN to remove * @action: action to remove */ -enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id, - enum ice_sw_fwd_act_type action) +int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_vlan(vsi, vlan_id, action, ice_fltr_remove_vlan_list); @@ -375,8 +425,8 @@ enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id, * @flag: direction of packet to be filtered, Tx or Rx * @action: action to be performed on filter match */ -enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, - enum ice_sw_fwd_act_type action) +int ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_eth(vsi, ethertype, flag, action, ice_fltr_add_eth_list); @@ -389,8 +439,8 @@ enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, * @flag: direction of filter * @action: action to remove */ -enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, - u16 flag, enum ice_sw_fwd_act_type action) +int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_eth(vsi, ethertype, flag, action, ice_fltr_remove_eth_list); @@ -406,17 +456,17 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, * @src: source VSI * @new_flags: combinations of lb_en and lan_en */ -static enum ice_status +static int ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id, u32 act, u16 type, u16 src, u32 new_flags) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status err; u32 flags_mask; + int err; s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE; act &= ~flags_mask; @@ -465,7 +515,7 @@ static u32 ice_fltr_build_action(u16 vsi_id) * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and * ICE_SINGLE_ACT_LAN_ENABLE. */ -enum ice_status +int ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, u32 new_flags) { diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h index 8eec4febead1..0e65fd021ad2 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.h +++ b/drivers/net/ethernet/intel/ice/ice_fltr.h @@ -5,38 +5,52 @@ #define _ICE_FLTR_H_ void ice_fltr_free_list(struct device *dev, struct list_head *h); -enum ice_status +int +ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask); +int +ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask); +int +ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +int +ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +int ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status -ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); -enum ice_status +int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); +int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status -ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list); +int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list); -enum ice_status +int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action); void ice_fltr_remove_all(struct ice_vsi *vsi); -enum ice_status + +int +ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id, + u32 new_flags); +int ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, u32 new_flags); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index f8601d5b0b19..ccdfd2f030d8 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -40,8 +40,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length) struct device *dev = context->dev; struct ice_pf *pf = priv->pf; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 *package_data; + int status; dev_dbg(dev, "Sending PLDM record package data to firmware\n"); @@ -54,9 +54,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length) kfree(package_data); if (status) { - dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware"); return -EIO; } @@ -203,8 +202,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon struct device *dev = context->dev; struct ice_pf *pf = priv->pf; struct ice_hw *hw = &pf->hw; - enum ice_status status; size_t length; + int status; switch (component->identifier) { case NVM_COMP_ID_OROM: @@ -240,9 +239,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon kfree(comp_tbl); if (status) { - dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware"); return -EIO; } @@ -277,7 +275,6 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, struct device *dev = ice_pf_to_dev(pf); struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; - enum ice_status status; u32 completion_offset; int err; @@ -286,11 +283,11 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n", block_size, module, offset); - status = ice_aq_update_nvm(hw, module, offset, block_size, block, - last_cmd, 0, NULL); - if (status) { - dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n", - module, block_size, offset, ice_stat_str(status), + err = ice_aq_update_nvm(hw, module, offset, block_size, block, + last_cmd, 0, NULL); + if (err) { + dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n", + module, block_size, offset, err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module"); return -EIO; @@ -445,7 +442,6 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; struct devlink *devlink; - enum ice_status status; int err; dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module); @@ -456,10 +452,10 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT); - status = ice_aq_erase_nvm(hw, module, NULL); - if (status) { - dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n", - component, module, ice_stat_str(status), + err = ice_aq_erase_nvm(hw, module, NULL); + if (err) { + dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n", + component, module, err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module"); err = -EIO; @@ -524,17 +520,15 @@ static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, struct device *dev = ice_pf_to_dev(pf); struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; - enum ice_status status; u16 completion_retval; int err; memset(&event, 0, sizeof(event)); - status = ice_nvm_write_activate(hw, activate_flags); - if (status) { - dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_nvm_write_activate(hw, activate_flags); + if (err) { + dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks"); return -EIO; } @@ -670,7 +664,6 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_fwu_priv priv; - enum ice_status status; int err; switch (preservation) { @@ -692,13 +685,12 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, priv.pf = pf; priv.activate_flags = preservation; - status = ice_acquire_nvm(hw, ICE_RES_WRITE); - if (status) { - dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) { + dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); - return -EIO; + return err; } err = pldmfw_flash_image(&priv.context, fw); @@ -736,7 +728,6 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component, struct device *dev = ice_pf_to_dev(pf); struct ice_hw_dev_caps *dev_caps; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 pending = 0; int err; @@ -749,11 +740,11 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component, * may have changed, e.g. if an update was previously completed and * the system has not yet rebooted. */ - status = ice_discover_dev_caps(hw, dev_caps); - if (status) { + err = ice_discover_dev_caps(hw, dev_caps); + if (err) { NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities"); kfree(dev_caps); - return -EIO; + return err; } if (dev_caps->common_cap.nvm_update_pending_nvm) { @@ -798,13 +789,12 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component, "Canceling previous pending update", component, 0, 0); - status = ice_acquire_nvm(hw, ICE_RES_WRITE); - if (status) { - dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) { + dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); - return -EIO; + return err; } pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 09a3297cd63c..1999b12708de 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -291,7 +291,7 @@ void ice_vsi_delete(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_vsi_ctx *ctxt; - enum ice_status status; + int status; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) @@ -305,8 +305,8 @@ void ice_vsi_delete(struct ice_vsi *vsi) status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); if (status) - dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", + vsi->vsi_num, status); kfree(ctxt); } @@ -709,15 +709,15 @@ bool ice_is_aux_ena(struct ice_pf *pf) static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - enum ice_status status; + int status; if (ice_is_safe_mode(pf)) return; status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); if (status) - dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n", + vsi->vsi_num, status); } /** @@ -1545,8 +1545,8 @@ ice_vsi_cfg_rss_exit: static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { @@ -1557,8 +1557,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); if (status) - dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", + vsi->vsi_num, status); } /** @@ -1577,8 +1577,8 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct device *dev; + int status; dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { @@ -1590,57 +1590,57 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for IPv6 with input set IPv6 src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for sctp4 with input set IP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for sctp6 with input set IPv6 src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", + vsi_num, status); } /** @@ -1749,22 +1749,21 @@ ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action) int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; - int err = 0; + int err; dev = ice_pf_to_dev(pf); - status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI); - if (!status) { + err = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI); + if (!err) { vsi->num_vlan--; - } else if (status == ICE_ERR_DOES_NOT_EXIST) { - dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n", - vid, vsi->vsi_num, ice_stat_str(status)); + } else if (err == -ENOENT) { + dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n", + vid, vsi->vsi_num, err); + err = 0; } else { - dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n", - vid, vsi->vsi_num, ice_stat_str(status)); - err = -EIO; + dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n", + vid, vsi->vsi_num, err); } return err; @@ -2105,8 +2104,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) @@ -2124,12 +2122,10 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -2148,8 +2144,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; /* do not allow modifying VLAN stripping when a port VLAN is configured * on this VSI @@ -2177,12 +2172,10 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n", - ena, ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n", + ena, ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -2324,10 +2317,9 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); if (status) { - netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n", + netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n", ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, - ice_stat_str(status), - ice_aq_str(pf->hw.adminq.sq_last_status)); + status, ice_aq_str(pf->hw.adminq.sq_last_status)); goto err_out; } @@ -2405,11 +2397,11 @@ clear_reg_idx: */ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) { - enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, - enum ice_sw_fwd_act_type act); + int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, + enum ice_sw_fwd_act_type act); struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; dev = ice_pf_to_dev(pf); eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth; @@ -2428,9 +2420,9 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) } if (status) - dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n", + dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", create ? "adding" : "removing", tx ? "TX" : "RX", - vsi->vsi_num, ice_stat_str(status)); + vsi->vsi_num, status); } /** @@ -2450,7 +2442,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) struct ice_port_info *port_info; struct ice_pf *pf = vsi->back; u32 agg_node_id_start = 0; - enum ice_status status; + int status; /* create (as needed) scheduler aggregator node and move VSI into * corresponding aggregator node @@ -2576,7 +2568,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; struct ice_vsi *vsi; int ret, i; @@ -2725,11 +2716,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, } dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, - max_txqs); - if (status) { - dev_err(dev, "VSI %d failed lan queue config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (ret) { + dev_err(dev, "VSI %d failed lan queue config, error %d\n", + vsi->vsi_num, ret); goto unroll_clear_rings; } @@ -3036,8 +3027,8 @@ void ice_napi_del(struct ice_vsi *vsi) */ int ice_vsi_release(struct ice_vsi *vsi) { - enum ice_status err; struct ice_pf *pf; + int err; if (!vsi->back) return -ENODEV; @@ -3273,7 +3264,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) int prev_num_q_vectors = 0; struct ice_vf *vf = NULL; enum ice_vsi_type vtype; - enum ice_status status; struct ice_pf *pf; int ret, i; @@ -3421,14 +3411,14 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) /* If MQPRIO is set, means channel code path, hence for main * VSI's, use TC as 1 */ - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); else - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, - vsi->tc_cfg.ena_tc, max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, + vsi->tc_cfg.ena_tc, max_txqs); - if (status) { - dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); + if (ret) { + dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n", + vsi->vsi_num, ret); if (init_vsi) { ret = -EIO; goto err_vectors; @@ -3663,7 +3653,6 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; struct ice_vsi_ctx *ctx; - enum ice_status status; struct device *dev; int i, ret = 0; u8 num_tc = 0; @@ -3705,25 +3694,22 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) /* must to indicate which section of VSI context are being modified */ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); - status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); - if (status) { + ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); + if (ret) { dev_info(dev, "Failed VSI Update\n"); - ret = -EIO; goto out; } if (vsi->type == ICE_VSI_PF && test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, - max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); else - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, - vsi->tc_cfg.ena_tc, max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, + vsi->tc_cfg.ena_tc, max_txqs); - if (status) { - dev_err(dev, "VSI %d failed TC config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); - ret = -EIO; + if (ret) { + dev_err(dev, "VSI %d failed TC config, error %d\n", + vsi->vsi_num, ret); goto out; } ice_vsi_update_q_map(vsi, ctx); @@ -3776,39 +3762,6 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes) } /** - * ice_status_to_errno - convert from enum ice_status to Linux errno - * @err: ice_status value to convert - */ -int ice_status_to_errno(enum ice_status err) -{ - switch (err) { - case ICE_SUCCESS: - return 0; - case ICE_ERR_DOES_NOT_EXIST: - return -ENOENT; - case ICE_ERR_OUT_OF_RANGE: - case ICE_ERR_AQ_ERROR: - case ICE_ERR_AQ_TIMEOUT: - case ICE_ERR_AQ_EMPTY: - case ICE_ERR_AQ_FW_CRITICAL: - return -EIO; - case ICE_ERR_PARAM: - case ICE_ERR_INVAL_SIZE: - return -EINVAL; - case ICE_ERR_NO_MEMORY: - return -ENOMEM; - case ICE_ERR_MAX_LIMIT: - return -EAGAIN; - case ICE_ERR_RESET_ONGOING: - return -EBUSY; - case ICE_ERR_AQ_FULL: - return -ENOSPC; - default: - return -EINVAL; - } -} - -/** * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used * @sw: switch to check if its default forwarding VSI is free * @@ -3849,8 +3802,8 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) */ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) { - enum ice_status status; struct device *dev; + int status; if (!sw || !vsi) return -EINVAL; @@ -3873,9 +3826,9 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX); if (status) { - dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n", - vsi->vsi_num, ice_stat_str(status)); - return -EIO; + dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n", + vsi->vsi_num, status); + return status; } sw->dflt_vsi = vsi; @@ -3895,8 +3848,8 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) int ice_clear_dflt_vsi(struct ice_sw *sw) { struct ice_vsi *dflt_vsi; - enum ice_status status; struct device *dev; + int status; if (!sw) return -EINVAL; @@ -3912,8 +3865,8 @@ int ice_clear_dflt_vsi(struct ice_sw *sw) status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false, ICE_FLTR_RX); if (status) { - dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n", - dflt_vsi->vsi_num, ice_stat_str(status)); + dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n", + dflt_vsi->vsi_num, status); return -EIO; } @@ -3987,8 +3940,8 @@ int ice_get_link_speed_kbps(struct ice_vsi *vsi) int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; int speed; dev = ice_pf_to_dev(pf); @@ -4014,7 +3967,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", @@ -4026,7 +3979,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) if (status) { dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n", ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", @@ -4048,8 +4001,8 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; int speed; dev = ice_pf_to_dev(pf); @@ -4075,7 +4028,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n", max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n", @@ -4087,7 +4040,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) if (status) { dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n", ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n", @@ -4107,7 +4060,7 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) struct device *dev = ice_pf_to_dev(vsi->back); struct ice_port_info *pi = vsi->port_info; struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; if (vsi->type != ICE_VSI_PF) return -EINVAL; @@ -4119,16 +4072,16 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) * a success code. Return an error if FW returns an error code other * than ICE_AQ_RC_EMODE */ - if (status == ICE_ERR_AQ_ERROR) { + if (status == -EIO) { if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) - dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n", - (ena ? "ON" : "OFF"), ice_stat_str(status), + dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", + (ena ? "ON" : "OFF"), status, ice_aq_str(hw->adminq.sq_last_status)); } else if (status) { - dev_err(dev, "can't set link to %s, err %s aq_err %s\n", - (ena ? "ON" : "OFF"), ice_stat_str(status), + dev_err(dev, "can't set link to %s, err %d aq_err %s\n", + (ena ? "ON" : "OFF"), status, ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; + return status; } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 6c803407b67d..b2ed189527d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -103,15 +103,11 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes); void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes); void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); - -int ice_status_to_errno(enum ice_status err); - void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); void ice_write_itr(struct ice_ring_container *rc, u16 itr); void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); -enum ice_status -ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); +int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); bool ice_is_safe_mode(struct ice_pf *pf); bool ice_is_aux_ena(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 515ac8c4e891..0c425d24889f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -155,7 +155,6 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) */ static int ice_init_mac_fltr(struct ice_pf *pf) { - enum ice_status status; struct ice_vsi *vsi; u8 *perm_addr; @@ -164,11 +163,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf) return -EINVAL; perm_addr = vsi->port_info->mac.perm_addr; - status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); - if (status) - return -EIO; - - return 0; + return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); } /** @@ -237,36 +232,43 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) } /** - * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF + * ice_set_promisc - Enable promiscuous mode for a given PF * @vsi: the VSI being configured * @promisc_m: mask of promiscuous config bits - * @set_promisc: enable or disable promisc flag request * */ -static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) +static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) { - struct ice_hw *hw = &vsi->back->hw; - enum ice_status status = 0; + int status; if (vsi->type != ICE_VSI_PF) return 0; - if (vsi->num_vlan > 1) { - status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, - set_promisc); - } else { - if (set_promisc) - status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, - 0); - else - status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, - 0); - } + if (vsi->num_vlan > 1) + status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); + else + status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + return status; +} - if (status) - return -EIO; +/** + * ice_clear_promisc - Disable promiscuous mode for a given PF + * @vsi: the VSI being configured + * @promisc_m: mask of promiscuous config bits + * + */ +static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) +{ + int status; - return 0; + if (vsi->type != ICE_VSI_PF) + return 0; + + if (vsi->num_vlan > 1) + status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); + else + status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + return status; } /** @@ -282,10 +284,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) bool promisc_forced_on = false; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status = 0; u32 changed_flags = 0; u8 promisc_m; - int err = 0; + int err; if (!vsi->netdev) return -EINVAL; @@ -315,25 +316,23 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } /* Remove MAC addresses in the unsync list */ - status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); + err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); ice_fltr_free_list(dev, &vsi->tmp_unsync_list); - if (status) { + if (err) { netdev_err(netdev, "Failed to delete MAC filters\n"); /* if we failed because of alloc failures, just bail */ - if (status == ICE_ERR_NO_MEMORY) { - err = -ENOMEM; + if (err == -ENOMEM) goto out; - } } /* Add MAC addresses in the sync list */ - status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); + err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); ice_fltr_free_list(dev, &vsi->tmp_sync_list); /* If filter is added successfully or already exists, do not go into * 'if' condition and report it as error. Instead continue processing * rest of the function. */ - if (status && status != ICE_ERR_ALREADY_EXISTS) { + if (err && err != -EEXIST) { netdev_err(netdev, "Failed to add MAC filters\n"); /* If there is no more space for new umac filters, VSI * should go into promiscuous mode. There should be some @@ -346,10 +345,10 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", vsi->vsi_num); } else { - err = -EIO; goto out; } } + err = 0; /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { if (vsi->current_netdev_flags & IFF_ALLMULTI) { @@ -358,7 +357,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) else promisc_m = ICE_MCAST_PROMISC_BITS; - err = ice_cfg_promisc(vsi, promisc_m, true); + err = ice_set_promisc(vsi, promisc_m); if (err) { netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", vsi->vsi_num); @@ -372,7 +371,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) else promisc_m = ICE_MCAST_PROMISC_BITS; - err = ice_cfg_promisc(vsi, promisc_m, false); + err = ice_clear_promisc(vsi, promisc_m); if (err) { netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", vsi->vsi_num); @@ -396,6 +395,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) ~IFF_PROMISC; goto out_promisc; } + err = 0; ice_cfg_vlan_pruning(vsi, false); } } else { @@ -695,12 +695,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) { struct ice_aqc_get_phy_caps_data *caps; const char *an_advertised; - enum ice_status status; const char *fec_req; const char *speed; const char *fec; const char *fc; const char *an; + int status; if (!vsi) return; @@ -1020,10 +1020,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, { struct device *dev = ice_pf_to_dev(pf); struct ice_phy_info *phy_info; - enum ice_status status; struct ice_vsi *vsi; u16 old_link_speed; bool old_link; + int status; phy_info = &pi->phy; phy_info->link_info_old = phy_info->link_info; @@ -1036,8 +1036,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, */ status = ice_update_link_info(pi); if (status) - dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n", - pi->lport, ice_stat_str(status), + dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", + pi->lport, status, ice_aq_str(pi->hw->adminq.sq_last_status)); ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); @@ -1407,15 +1407,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) return 0; do { - enum ice_status ret; u16 opcode; + int ret; ret = ice_clean_rq_elem(hw, cq, &event, &pending); - if (ret == ICE_ERR_AQ_NO_WORK) + if (ret == -EALREADY) break; if (ret) { - dev_err(dev, "%s Receive Queue event error %s\n", qtype, - ice_stat_str(ret)); + dev_err(dev, "%s Receive Queue event error %d\n", qtype, + ret); break; } @@ -1866,19 +1866,17 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi) { struct ice_aqc_get_phy_caps_data *pcaps; struct ice_pf *pf = pi->hw->back; - enum ice_status status; - int err = 0; + int err; pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, - NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, + pcaps, NULL); - if (status) { + if (err) { dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); - err = -EIO; goto out; } @@ -1977,8 +1975,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) struct ice_aqc_get_phy_caps_data *pcaps; struct ice_phy_info *phy = &pi->phy; struct ice_pf *pf = pi->hw->back; - enum ice_status status; - int err = 0; + int err; if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) return -EIO; @@ -1988,14 +1985,13 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) return -ENOMEM; if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - pcaps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + pcaps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - pcaps, NULL); - if (status) { + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + pcaps, NULL); + if (err) { dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); - err = -EIO; goto err_out; } @@ -2049,8 +2045,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) struct ice_aqc_set_phy_cfg_data *cfg; struct ice_phy_info *phy = &pi->phy; struct ice_pf *pf = vsi->back; - enum ice_status status; - int err = 0; + int err; /* Ensure we have media as we cannot configure a medialess port */ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) @@ -2070,12 +2065,11 @@ static int ice_configure_phy(struct ice_vsi *vsi) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, - NULL); - if (status) { - dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (err) { + dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", + vsi->vsi_num, err); goto done; } @@ -2089,15 +2083,14 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Use PHY topology as baseline for configuration */ memset(pcaps, 0, sizeof(*pcaps)); if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - pcaps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + pcaps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - pcaps, NULL); - if (status) { - dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + pcaps, NULL); + if (err) { + dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", + vsi->vsi_num, err); goto done; } @@ -2150,12 +2143,10 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Enable link and link update */ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; - status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); - if (status) { - dev_err(dev, "Failed to set phy config, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; - } + err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); + if (err) + dev_err(dev, "Failed to set phy config, VSI %d error %d\n", + vsi->vsi_num, err); kfree(cfg); done: @@ -2549,9 +2540,9 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) .vsi_map_offset = vsi->alloc_txq, .mapping_mode = ICE_VSI_MAP_CONTIG }; - enum ice_status status; struct device *dev; int i, v_idx; + int status; dev = ice_pf_to_dev(pf); vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, @@ -2605,8 +2596,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { - dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n", - ice_stat_str(status)); + dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", + status); goto clear_xdp_rings; } @@ -3520,7 +3511,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi; - int status = 0; + int status; if (ice_is_reset_in_progress(pf->state)) return -EBUSY; @@ -3533,10 +3524,8 @@ static int ice_setup_pf_sw(struct ice_pf *pf) INIT_LIST_HEAD(&vsi->ch_list); status = ice_cfg_netdev(vsi); - if (status) { - status = -ENODEV; + if (status) goto unroll_vsi_setup; - } /* netdev has to be configured before setting frame size */ ice_vsi_cfg_frame_size(vsi); @@ -3560,14 +3549,13 @@ static int ice_setup_pf_sw(struct ice_pf *pf) if (status) { dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", vsi->vsi_num, status); - status = -EINVAL; goto unroll_napi_add; } status = ice_init_mac_fltr(pf); if (status) goto free_cpu_rx_map; - return status; + return 0; free_cpu_rx_map: ice_free_cpu_rx_rmap(vsi); @@ -4023,8 +4011,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) { struct ice_vsi *vsi = ice_get_main_vsi(pf); struct ice_vsi_ctx *ctxt; - enum ice_status status; struct ice_hw *hw; + int status; if (!vsi) return; @@ -4054,9 +4042,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); } else { vsi->info.sec_flags = ctxt->info.sec_flags; vsi->info.sw_flags2 = ctxt->info.sw_flags2; @@ -4069,109 +4056,80 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) /** * ice_log_pkg_init - log result of DDP package load * @hw: pointer to hardware info - * @status: status of package load + * @state: state of package load */ -static void -ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) +static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) { - struct ice_pf *pf = (struct ice_pf *)hw->back; - struct device *dev = ice_pf_to_dev(pf); + struct ice_pf *pf = hw->back; + struct device *dev; - switch (*status) { - case ICE_SUCCESS: - /* The package download AdminQ command returned success because - * this download succeeded or ICE_ERR_AQ_NO_WORK since there is - * already a package loaded on the device. - */ - if (hw->pkg_ver.major == hw->active_pkg_ver.major && - hw->pkg_ver.minor == hw->active_pkg_ver.minor && - hw->pkg_ver.update == hw->active_pkg_ver.update && - hw->pkg_ver.draft == hw->active_pkg_ver.draft && - !memcmp(hw->pkg_name, hw->active_pkg_name, - sizeof(hw->pkg_name))) { - if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) - dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft); - else - dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft); - } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || - hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { - dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); - *status = ICE_ERR_NOT_SUPPORTED; - } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { - dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft, - hw->pkg_name, - hw->pkg_ver.major, - hw->pkg_ver.minor, - hw->pkg_ver.update, - hw->pkg_ver.draft); - } else { - dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); - *status = ICE_ERR_NOT_SUPPORTED; - } + dev = ice_pf_to_dev(pf); + + switch (state) { + case ICE_DDP_PKG_SUCCESS: + dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); break; - case ICE_ERR_FW_DDP_MISMATCH: + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + break; + case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: + dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft, + hw->pkg_name, + hw->pkg_ver.major, + hw->pkg_ver.minor, + hw->pkg_ver.update, + hw->pkg_ver.draft); + break; + case ICE_DDP_PKG_FW_MISMATCH: dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); break; - case ICE_ERR_BUF_TOO_SHORT: - case ICE_ERR_CFG: + case ICE_DDP_PKG_INVALID_FILE: dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); break; - case ICE_ERR_NOT_SUPPORTED: - /* Package File version not supported */ - if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || - (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) - dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); - else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || - (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) - dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", - ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: + dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); break; - case ICE_ERR_AQ_ERROR: - switch (hw->pkg_dwnld_status) { - case ICE_AQ_RC_ENOSEC: - case ICE_AQ_RC_EBADSIG: - dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); - return; - case ICE_AQ_RC_ESVN: - dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); - return; - case ICE_AQ_RC_EBADMAN: - case ICE_AQ_RC_EBADBUF: - dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); + case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: + dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: + dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); + break; + case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: + dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); + break; + case ICE_DDP_PKG_LOAD_ERROR: + dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); /* poll for reset to complete */ if (ice_check_reset(hw)) dev_err(dev, "Error resetting device. Please reload the driver\n"); - return; - default: - break; - } - fallthrough; - default: - dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", - *status); break; + case ICE_DDP_PKG_ERR: + default: + dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); } } @@ -4186,24 +4144,24 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) static void ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) { - enum ice_status status = ICE_ERR_PARAM; + enum ice_ddp_state state = ICE_DDP_PKG_ERR; struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; /* Load DDP Package */ if (firmware && !hw->pkg_copy) { - status = ice_copy_and_init_pkg(hw, firmware->data, - firmware->size); - ice_log_pkg_init(hw, &status); + state = ice_copy_and_init_pkg(hw, firmware->data, + firmware->size); + ice_log_pkg_init(hw, state); } else if (!firmware && hw->pkg_copy) { /* Reload package during rebuild after CORER/GLOBR reset */ - status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); - ice_log_pkg_init(hw, &status); + state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); + ice_log_pkg_init(hw, state); } else { dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); } - if (status) { + if (!ice_is_init_pkg_successful(state)) { /* Safe Mode */ clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); return; @@ -4234,9 +4192,9 @@ static void ice_verify_cacheline_size(struct ice_pf *pf) * ice_send_version - update firmware with driver version * @pf: PF struct * - * Returns ICE_SUCCESS on success, else error code + * Returns 0 on success, else error code */ -static enum ice_status ice_send_version(struct ice_pf *pf) +static int ice_send_version(struct ice_pf *pf) { struct ice_driver_ver dv; @@ -4526,7 +4484,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) * true */ if (ice_is_safe_mode(pf)) { - dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); /* we already got function/device capabilities but these don't * reflect what the driver needs to do in safe mode. Instead of * adding conditional logic everywhere to ignore these @@ -4809,9 +4766,9 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 mac_addr[ETH_ALEN]; struct ice_vsi *vsi; + int status; u8 flags; if (!pf->wol_ena) @@ -4833,9 +4790,8 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf) status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); if (status) - dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); } /** @@ -5374,11 +5330,10 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct sockaddr *addr = pi; - enum ice_status status; u8 old_mac[ETH_ALEN]; u8 flags = 0; - int err = 0; u8 *mac; + int err; mac = (u8 *)addr->sa_data; @@ -5410,22 +5365,22 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) netif_addr_unlock_bh(netdev); /* Clean up old MAC filter. Not an error if old filter doesn't exist */ - status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); - if (status && status != ICE_ERR_DOES_NOT_EXIST) { + err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); + if (err && err != -ENOENT) { err = -EADDRNOTAVAIL; goto err_update_filters; } /* Add filter for new MAC. If filter exists, return success */ - status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); - if (status == ICE_ERR_ALREADY_EXISTS) + err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); + if (err == -EEXIST) /* Although this MAC filter is already present in hardware it's * possible in some cases (e.g. bonding) that dev_addr was * modified outside of the driver and needs to be restored back * to this value. */ netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); - else if (status) + else if (err) /* error if the new filter addition failed */ err = -EADDRNOTAVAIL; @@ -5444,10 +5399,10 @@ err_update_filters: /* write new MAC address to the firmware */ flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; - status = ice_aq_manage_mac_write(hw, mac, flags, NULL); - if (status) { - netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n", - mac, ice_stat_str(status)); + err = ice_aq_manage_mac_write(hw, mac, flags, NULL); + if (err) { + netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", + mac, err); } return 0; } @@ -5489,8 +5444,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - enum ice_status status; u16 q_handle; + int status; u8 tc; /* Validate maxrate requested is within permitted range */ @@ -5510,13 +5465,11 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) else status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, q_handle, ICE_MAX_BW, maxrate * 1000); - if (status) { - netdev_err(netdev, "Unable to set Tx max rate, error %s\n", - ice_stat_str(status)); - return -EIO; - } + if (status) + netdev_err(netdev, "Unable to set Tx max rate, error %d\n", + status); - return 0; + return status; } /** @@ -5888,6 +5841,9 @@ static int ice_up_complete(struct ice_vsi *vsi) netif_carrier_on(vsi->netdev); } + /* clear this now, and the first stats read will be used as baseline */ + vsi->stat_offsets_loaded = false; + ice_service_task_schedule(pf); return 0; @@ -5934,14 +5890,15 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats st /** * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters * @vsi: the VSI to be updated + * @vsi_stats: the stats struct to be updated * @rings: rings to work on * @count: number of rings */ static void -ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings, - u16 count) +ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, + struct rtnl_link_stats64 *vsi_stats, + struct ice_tx_ring **rings, u16 count) { - struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; u16 i; for (i = 0; i < count; i++) { @@ -5965,15 +5922,13 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings, */ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) { - struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; + struct rtnl_link_stats64 *vsi_stats; u64 pkts, bytes; int i; - /* reset netdev stats */ - vsi_stats->tx_packets = 0; - vsi_stats->tx_bytes = 0; - vsi_stats->rx_packets = 0; - vsi_stats->rx_bytes = 0; + vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); + if (!vsi_stats) + return; /* reset non-netdev (extended) stats */ vsi->tx_restart = 0; @@ -5985,7 +5940,8 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) rcu_read_lock(); /* update Tx rings counters */ - ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); + ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, + vsi->num_txq); /* update Rx rings counters */ ice_for_each_rxq(vsi, i) { @@ -6000,10 +5956,17 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) /* update XDP Tx rings counters */ if (ice_is_xdp_ena_vsi(vsi)) - ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, + ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, vsi->num_xdp_txq); rcu_read_unlock(); + + vsi->net_stats.tx_packets = vsi_stats->tx_packets; + vsi->net_stats.tx_bytes = vsi_stats->tx_bytes; + vsi->net_stats.rx_packets = vsi_stats->rx_packets; + vsi->net_stats.rx_bytes = vsi_stats->rx_bytes; + + kfree(vsi_stats); } /** @@ -6540,7 +6503,6 @@ static void ice_vsi_release_all(struct ice_pf *pf) static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) { struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; int i, err; ice_for_each_vsi(pf, i) { @@ -6558,12 +6520,11 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) } /* replay filters for the VSI */ - status = ice_replay_vsi(&pf->hw, vsi->idx); - if (status) { - dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n", - ice_stat_str(status), vsi->idx, - ice_vsi_type_str(type)); - return -EIO; + err = ice_replay_vsi(&pf->hw, vsi->idx); + if (err) { + dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", + err, vsi->idx, ice_vsi_type_str(type)); + return err; } /* Re-map HW VSI number, using VSI handle that has been @@ -6626,7 +6587,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status ret; int err; if (test_bit(ICE_DOWN, pf->state)) @@ -6634,10 +6594,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); - ret = ice_init_all_ctrlq(hw); - if (ret) { - dev_err(dev, "control queues init failed %s\n", - ice_stat_str(ret)); + err = ice_init_all_ctrlq(hw); + if (err) { + dev_err(dev, "control queues init failed %d\n", err); goto err_init_ctrlq; } @@ -6651,10 +6610,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_load_pkg(NULL, pf); } - ret = ice_clear_pf_cfg(hw); - if (ret) { - dev_err(dev, "clear PF configuration failed %s\n", - ice_stat_str(ret)); + err = ice_clear_pf_cfg(hw); + if (err) { + dev_err(dev, "clear PF configuration failed %d\n", err); goto err_init_ctrlq; } @@ -6666,21 +6624,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_clear_pxe_mode(hw); - ret = ice_init_nvm(hw); - if (ret) { - dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret)); + err = ice_init_nvm(hw); + if (err) { + dev_err(dev, "ice_init_nvm failed %d\n", err); goto err_init_ctrlq; } - ret = ice_get_caps(hw); - if (ret) { - dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); + err = ice_get_caps(hw); + if (err) { + dev_err(dev, "ice_get_caps failed %d\n", err); goto err_init_ctrlq; } - ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); - if (ret) { - dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); + err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); + if (err) { + dev_err(dev, "set_mac_cfg failed %d\n", err); goto err_init_ctrlq; } @@ -6763,10 +6721,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_update_pf_netdev_link(pf); /* tell the firmware we are up */ - ret = ice_send_version(pf); - if (ret) { - dev_err(dev, "Rebuild failed due to error sending driver version: %s\n", - ice_stat_str(ret)); + err = ice_send_version(pf); + if (err) { + dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", + err); goto err_vsi_rebuild; } @@ -6947,78 +6905,6 @@ const char *ice_aq_str(enum ice_aq_err aq_err) } /** - * ice_stat_str - convert status err code to a string - * @stat_err: the status error code to convert - */ -const char *ice_stat_str(enum ice_status stat_err) -{ - switch (stat_err) { - case ICE_SUCCESS: - return "OK"; - case ICE_ERR_PARAM: - return "ICE_ERR_PARAM"; - case ICE_ERR_NOT_IMPL: - return "ICE_ERR_NOT_IMPL"; - case ICE_ERR_NOT_READY: - return "ICE_ERR_NOT_READY"; - case ICE_ERR_NOT_SUPPORTED: - return "ICE_ERR_NOT_SUPPORTED"; - case ICE_ERR_BAD_PTR: - return "ICE_ERR_BAD_PTR"; - case ICE_ERR_INVAL_SIZE: - return "ICE_ERR_INVAL_SIZE"; - case ICE_ERR_DEVICE_NOT_SUPPORTED: - return "ICE_ERR_DEVICE_NOT_SUPPORTED"; - case ICE_ERR_RESET_FAILED: - return "ICE_ERR_RESET_FAILED"; - case ICE_ERR_FW_API_VER: - return "ICE_ERR_FW_API_VER"; - case ICE_ERR_NO_MEMORY: - return "ICE_ERR_NO_MEMORY"; - case ICE_ERR_CFG: - return "ICE_ERR_CFG"; - case ICE_ERR_OUT_OF_RANGE: - return "ICE_ERR_OUT_OF_RANGE"; - case ICE_ERR_ALREADY_EXISTS: - return "ICE_ERR_ALREADY_EXISTS"; - case ICE_ERR_NVM: - return "ICE_ERR_NVM"; - case ICE_ERR_NVM_CHECKSUM: - return "ICE_ERR_NVM_CHECKSUM"; - case ICE_ERR_BUF_TOO_SHORT: - return "ICE_ERR_BUF_TOO_SHORT"; - case ICE_ERR_NVM_BLANK_MODE: - return "ICE_ERR_NVM_BLANK_MODE"; - case ICE_ERR_IN_USE: - return "ICE_ERR_IN_USE"; - case ICE_ERR_MAX_LIMIT: - return "ICE_ERR_MAX_LIMIT"; - case ICE_ERR_RESET_ONGOING: - return "ICE_ERR_RESET_ONGOING"; - case ICE_ERR_HW_TABLE: - return "ICE_ERR_HW_TABLE"; - case ICE_ERR_DOES_NOT_EXIST: - return "ICE_ERR_DOES_NOT_EXIST"; - case ICE_ERR_FW_DDP_MISMATCH: - return "ICE_ERR_FW_DDP_MISMATCH"; - case ICE_ERR_AQ_ERROR: - return "ICE_ERR_AQ_ERROR"; - case ICE_ERR_AQ_TIMEOUT: - return "ICE_ERR_AQ_TIMEOUT"; - case ICE_ERR_AQ_FULL: - return "ICE_ERR_AQ_FULL"; - case ICE_ERR_AQ_NO_WORK: - return "ICE_ERR_AQ_NO_WORK"; - case ICE_ERR_AQ_EMPTY: - return "ICE_ERR_AQ_EMPTY"; - case ICE_ERR_AQ_FW_CRITICAL: - return "ICE_ERR_AQ_FW_CRITICAL"; - } - - return "ICE_ERR_UNKNOWN"; -} - -/** * ice_set_rss_lut - Set RSS LUT * @vsi: Pointer to VSI structure * @lut: Lookup table @@ -7030,7 +6916,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) { struct ice_aq_get_set_rss_lut_params params = {}; struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!lut) return -EINVAL; @@ -7041,14 +6927,11 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) params.lut = lut; status = ice_aq_set_rss_lut(hw, ¶ms); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7061,20 +6944,17 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) { struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!seed) return -EINVAL; status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7089,7 +6969,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) { struct ice_aq_get_set_rss_lut_params params = {}; struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!lut) return -EINVAL; @@ -7100,14 +6980,11 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) params.lut = lut; status = ice_aq_get_rss_lut(hw, ¶ms); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7120,20 +6997,17 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) { struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!seed) return -EINVAL; status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7174,8 +7048,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) struct ice_aqc_vsi_props *vsi_props; struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; vsi_props = &vsi->info; @@ -7193,12 +7066,10 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n", - bmode, ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", + bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } /* Update sw flags for book keeping */ @@ -7230,7 +7101,6 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, struct ice_pf *pf = np->vsi->back; struct nlattr *attr, *br_spec; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct ice_sw *pf_sw; int rem, v, err = 0; @@ -7264,14 +7134,14 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, /* Update the unicast switch filter rules for the corresponding * switch of the netdev */ - status = ice_update_sw_rule_bridge_mode(hw); - if (status) { - netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n", - mode, ice_stat_str(status), + err = ice_update_sw_rule_bridge_mode(hw); + if (err) { + netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", + mode, err, ice_aq_str(hw->adminq.sq_last_status)); /* revert hw->evb_veb */ hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); - return -EIO; + return err; } pf_sw->bridge_mode = mode; @@ -8446,7 +8316,6 @@ int ice_open_internal(struct net_device *netdev) struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_port_info *pi; - enum ice_status status; int err; if (test_bit(ICE_NEEDS_RESTART, pf->state)) { @@ -8457,11 +8326,10 @@ int ice_open_internal(struct net_device *netdev) netif_carrier_off(netdev); pi = vsi->port_info; - status = ice_update_link_info(pi); - if (status) { - netdev_err(netdev, "Failed to get link info, error %s\n", - ice_stat_str(status)); - return -EIO; + err = ice_update_link_info(pi); + if (err) { + netdev_err(netdev, "Failed to get link info, error %d\n", err); + return err; } ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index fee37a5844cf..2d95fe533d1e 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -16,7 +16,7 @@ * * Read the NVM using the admin queue commands (0x0701) */ -static enum ice_status +static int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, bool read_shadow_ram, struct ice_sq_cd *cd) @@ -27,7 +27,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, cmd = &desc.params.nvm; if (offset > ICE_AQC_NVM_MAX_OFFSET) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); @@ -60,21 +60,21 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, * Returns a status code on failure. Note that the data pointer may be * partially updated if some reads succeed before a failure. */ -enum ice_status +int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram) { - enum ice_status status; u32 inlen = *length; u32 bytes_read = 0; bool last_cmd; + int status; *length = 0; /* Verify the length of the read if this is for the Shadow RAM */ if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) { ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n"); - return ICE_ERR_PARAM; + return -EINVAL; } do { @@ -119,7 +119,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, * * Update the NVM using the admin queue commands (0x0703) */ -enum ice_status +int ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, u8 command_flags, struct ice_sq_cd *cd) @@ -131,7 +131,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); @@ -158,8 +158,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, * * Erase the NVM sector using the admin queue commands (0x0702) */ -enum ice_status -ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) +int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; @@ -184,12 +183,11 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) * * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. */ -static enum ice_status -ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) +static int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) { u32 bytes = sizeof(u16); - enum ice_status status; __le16 data_local; + int status; /* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and * Shadow RAM sector restrictions necessary when reading from the NVM. @@ -210,8 +208,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) * * This function will request NVM ownership. */ -enum ice_status -ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) +int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) { if (hw->flash.blank_nvm_mode) return 0; @@ -318,18 +315,18 @@ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select ban * hw->flash.banks data being setup by ice_determine_active_flash_banks() * during initialization. */ -static enum ice_status +static int ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, u32 offset, u8 *data, u32 length) { - enum ice_status status; + int status; u32 start; start = ice_get_flash_bank_offset(hw, bank, module); if (!start) { ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n", module); - return ICE_ERR_PARAM; + return -EINVAL; } status = ice_acquire_nvm(hw, ICE_RES_READ); @@ -353,11 +350,11 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, * Read the specified word from the active NVM module. This includes the CSS * header at the start of the NVM module. */ -static enum ice_status +static int ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - enum ice_status status; __le16 data_local; + int status; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16), (__force u8 *)&data_local, sizeof(u16)); @@ -377,7 +374,7 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1 * Read the specified word from the copy of the Shadow RAM found in the * specified NVM module. */ -static enum ice_status +static int ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); @@ -392,11 +389,11 @@ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u * * Read a word from the specified netlist bank. */ -static enum ice_status +static int ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - enum ice_status status; __le16 data_local; + int status; status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16), (__force u8 *)&data_local, sizeof(u16)); @@ -414,9 +411,9 @@ ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset * * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. */ -enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) +int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) { - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (!status) { @@ -438,13 +435,13 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) * Area (PFA) and returns the TLV pointer and length. The caller can * use these to read the variable length TLV value. */ -enum ice_status +int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { - enum ice_status status; u16 pfa_len, pfa_ptr; u16 next_tlv; + int status; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); if (status) { @@ -482,7 +479,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, *module_tlv_len = tlv_len; return 0; } - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } /* Check next TLV, i.e. current TLV pointer + length + 2 words * (for current TLV's type and length) @@ -490,7 +487,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, next_tlv = next_tlv + tlv_len + 2; } /* Module does not exist */ - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -501,12 +498,11 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, * * Reads the part number string from the NVM. */ -enum ice_status -ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) +int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) { u16 pba_tlv, pba_tlv_len; - enum ice_status status; u16 pba_word, pba_size; + int status; u16 i; status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, @@ -525,7 +521,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) if (pba_tlv_len < pba_size) { ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } /* Subtract one to get PBA word count (PBA Size word is included in @@ -534,7 +530,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); - return ICE_ERR_PARAM; + return -EINVAL; } for (i = 0; i < pba_size; i++) { @@ -561,11 +557,11 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) * Read the NVM EETRACK ID and map version of the main NVM image bank, filling * in the NVM info structure. */ -static enum ice_status +static int ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm) { u16 eetrack_lo, eetrack_hi, ver; - enum ice_status status; + int status; status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver); if (status) { @@ -601,7 +597,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv * inactive NVM bank. Used to access version data for a pending update that * has not yet been activated. */ -enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) +int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) { return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm); } @@ -615,12 +611,12 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info * Searches through the Option ROM flash contents to locate the CIVD data for * the image. */ -static enum ice_status +static int ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_civd_info *civd) { struct ice_orom_civd_info tmp; - enum ice_status status; + int status; u32 offset; /* The CIVD section is located in the Option ROM aligned to 512 bytes. @@ -650,14 +646,14 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, if (sum) { ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", sum); - return ICE_ERR_NVM; + return -EIO; } *civd = tmp; return 0; } - return ICE_ERR_NVM; + return -EIO; } /** @@ -669,12 +665,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, * Read Option ROM version and security revision from the Option ROM flash * section. */ -static enum ice_status +static int ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom) { struct ice_orom_civd_info civd; - enum ice_status status; u32 combo_ver; + int status; status = ice_get_orom_civd_data(hw, bank, &civd); if (status) { @@ -700,7 +696,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o * section of flash. Used to access version data for a pending update that has * not yet been activated. */ -enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) +int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) { return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom); } @@ -715,13 +711,13 @@ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_inf * Topology section to find the Netlist ID block and extract the relevant * information into the netlist version structure. */ -static enum ice_status +static int ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_netlist_info *netlist) { u16 module_id, length, node_count, i; - enum ice_status status; u16 *id_blk; + int status; status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id); if (status) @@ -730,7 +726,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) { ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n", ICE_NETLIST_LINK_TOPO_MOD_ID, module_id); - return ICE_ERR_NVM; + return -EIO; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length); @@ -741,7 +737,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, if (length < ICE_NETLIST_ID_BLK_SIZE) { ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n", ICE_NETLIST_ID_BLK_SIZE, length); - return ICE_ERR_NVM; + return -EIO; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count); @@ -751,7 +747,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL); if (!id_blk) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Read out the entire Netlist ID Block at once. */ status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, @@ -791,7 +787,7 @@ exit_error: * extract version data of a pending flash update in order to display the * version data. */ -enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) +int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) { return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist); } @@ -804,10 +800,10 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli * the actual size is smaller. Use bisection to determine the accessible size * of flash memory. */ -static enum ice_status ice_discover_flash_size(struct ice_hw *hw) +static int ice_discover_flash_size(struct ice_hw *hw) { u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) @@ -819,7 +815,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw) u8 data; status = ice_read_flat_nvm(hw, offset, &len, &data, false); - if (status == ICE_ERR_AQ_ERROR && + if (status == -EIO && hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", __func__, offset); @@ -859,10 +855,9 @@ err_read_flat_nvm: * sector size by using the highest bit. The reported pointer value will be in * bytes, intended for flat NVM reads. */ -static enum ice_status -ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) +static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) { - enum ice_status status; + int status; u16 value; status = ice_read_sr_word(hw, offset, &value); @@ -891,10 +886,9 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) * Each area size word is specified in 4KB sector units. This function reports * the size in bytes, intended for flat NVM reads. */ -static enum ice_status -ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) +static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) { - enum ice_status status; + int status; u16 value; status = ice_read_sr_word(hw, offset, &value); @@ -917,12 +911,11 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) * structure for later use in order to calculate the correct offset to read * from the active module. */ -static enum ice_status -ice_determine_active_flash_banks(struct ice_hw *hw) +static int ice_determine_active_flash_banks(struct ice_hw *hw) { struct ice_bank_info *banks = &hw->flash.banks; - enum ice_status status; u16 ctrl_word; + int status; status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word); if (status) { @@ -933,7 +926,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw) /* Check that the control word indicates validity */ if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); - return ICE_ERR_CFG; + return -EIO; } if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK)) @@ -997,12 +990,12 @@ ice_determine_active_flash_banks(struct ice_hw *hw) * This function reads and populates NVM settings such as Shadow RAM size, * max_timeout, and blank_nvm_mode */ -enum ice_status ice_init_nvm(struct ice_hw *hw) +int ice_init_nvm(struct ice_hw *hw) { struct ice_flash_info *flash = &hw->flash; - enum ice_status status; u32 fla, gens_stat; u8 sr_size; + int status; /* The SR size is stored regardless of the NVM programming mode * as the blank mode may be used in the factory line. @@ -1021,7 +1014,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) /* Blank programming mode */ flash->blank_nvm_mode = true; ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); - return ICE_ERR_NVM_BLANK_MODE; + return -EIO; } status = ice_discover_flash_size(hw); @@ -1060,11 +1053,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) * * Verify NVM PFA checksum validity (0x0706) */ -enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) +int ice_nvm_validate_checksum(struct ice_hw *hw) { struct ice_aqc_nvm_checksum *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) @@ -1080,7 +1073,7 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) if (!status) if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) - status = ICE_ERR_NVM_CHECKSUM; + status = -EIO; return status; } @@ -1093,7 +1086,7 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) * Update the control word with the required banks' validity bits * and dumps the Shadow RAM to flash (0x0707) */ -enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) +int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) { struct ice_aqc_nvm *cmd; struct ice_aq_desc desc; @@ -1113,7 +1106,7 @@ enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) * Update empr (0x0709). This command allows SW to * request an EMPR to activate new FW. */ -enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw) +int ice_aq_nvm_update_empr(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -1136,7 +1129,7 @@ enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw) * as part of the NVM update as the first cmd in the flow. */ -enum ice_status +int ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, u16 length, struct ice_sq_cd *cd) { @@ -1144,7 +1137,7 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, struct ice_aq_desc desc; if (length != 0 && !data) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.pkg_data; @@ -1173,17 +1166,17 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, * the TransferFlag is set to End or StartAndEnd. */ -enum ice_status +int ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, u8 transfer_flag, u8 *comp_response, u8 *comp_response_code, struct ice_sq_cd *cd) { struct ice_aqc_nvm_pass_comp_tbl *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!data || !comp_response || !comp_response_code) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.pass_comp_tbl; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index c6f05f43d593..f1d754ffac16 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -12,38 +12,34 @@ struct ice_orom_civd_info { __le16 combo_name[32]; /* Unicode string representing the Combo Image version */ } __packed; -enum ice_status -ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); +int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_nvm(struct ice_hw *hw); -enum ice_status +int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram); -enum ice_status +int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type); -enum ice_status -ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom); -enum ice_status -ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm); -enum ice_status +int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom); +int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm); +int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist); -enum ice_status -ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); -enum ice_status ice_init_nvm(struct ice_hw *hw); -enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); -enum ice_status +int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); +int ice_init_nvm(struct ice_hw *hw); +int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); +int ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, u8 command_flags, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd); -enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); -enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags); -enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw); -enum ice_status +int ice_nvm_validate_checksum(struct ice_hw *hw); +int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags); +int ice_aq_nvm_update_empr(struct ice_hw *hw); +int ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, u16 length, struct ice_sq_cd *cd); -enum ice_status +int ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, u8 transfer_flag, u8 *comp_response, u8 *comp_response_code, struct ice_sq_cd *cd); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index bf7247c6f58e..dfc7c830acf6 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -1205,10 +1205,6 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) static int ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) { - /* Reserved for future extensions. */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: ice_set_tx_tstamp(pf, false); diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index ce3c7bded4cb..7947223536e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -11,7 +11,7 @@ * This function inserts the root node of the scheduling tree topology * to the SW DB. */ -static enum ice_status +static int ice_sched_add_root_node(struct ice_port_info *pi, struct ice_aqc_txsched_elem_data *info) { @@ -19,20 +19,20 @@ ice_sched_add_root_node(struct ice_port_info *pi, struct ice_hw *hw; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL); if (!root) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* coverity[suspicious_sizeof] */ root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], sizeof(*root), GFP_KERNEL); if (!root->children) { devm_kfree(ice_hw_to_dev(hw), root); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } memcpy(&root->info, info, sizeof(*info)); @@ -96,14 +96,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) * * This function sends a scheduling elements cmd (cmd_opc) */ -static enum ice_status +static int ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, u16 elems_req, void *buf, u16 buf_size, u16 *elems_resp, struct ice_sq_cd *cd) { struct ice_aqc_sched_elem_cmd *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.sched_elem_cmd; ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); @@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, * * Query scheduling elements (0x0404) */ -enum ice_status +int ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) @@ -145,18 +145,18 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, * * This function inserts a scheduler node to the SW DB. */ -enum ice_status +int ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info) { struct ice_aqc_txsched_elem_data elem; struct ice_sched_node *parent; struct ice_sched_node *node; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; @@ -166,7 +166,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, if (!parent) { ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", le32_to_cpu(info->parent_teid)); - return ICE_ERR_PARAM; + return -EINVAL; } /* query the current node information from FW before adding it @@ -178,7 +178,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); if (!node) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; if (hw->max_children[layer]) { /* coverity[suspicious_sizeof] */ node->children = devm_kcalloc(ice_hw_to_dev(hw), @@ -186,7 +186,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, sizeof(*node), GFP_KERNEL); if (!node->children) { devm_kfree(ice_hw_to_dev(hw), node); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } } @@ -209,7 +209,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, * * Delete scheduling elements (0x040F) */ -static enum ice_status +static int ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_delete_elem *buf, u16 buf_size, u16 *grps_del, struct ice_sq_cd *cd) @@ -228,19 +228,19 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, * * This function remove nodes from HW */ -static enum ice_status +static int ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, u16 num_nodes, u32 *node_teids) { struct ice_aqc_delete_elem *buf; u16 i, num_groups_removed = 0; - enum ice_status status; u16 buf_size; + int status; buf_size = struct_size(buf, teid, num_nodes); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = cpu_to_le16(num_nodes); @@ -369,14 +369,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) * * Get default scheduler topology (0x400) */ -static enum ice_status +static int ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, struct ice_aqc_get_topo_elem *buf, u16 buf_size, u8 *num_branches, struct ice_sq_cd *cd) { struct ice_aqc_get_topo *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.get_topo; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); @@ -399,7 +399,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, * * Add scheduling elements (0x0401) */ -static enum ice_status +static int ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_add_elem *buf, u16 buf_size, u16 *grps_added, struct ice_sq_cd *cd) @@ -420,7 +420,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, * * Configure scheduling elements (0x0403) */ -static enum ice_status +static int ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_cfgd, struct ice_sq_cd *cd) @@ -441,7 +441,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, * * Move scheduling elements (0x0408) */ -static enum ice_status +static int ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_move_elem *buf, u16 buf_size, u16 *grps_movd, struct ice_sq_cd *cd) @@ -462,7 +462,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, * * Suspend scheduling elements (0x0409) */ -static enum ice_status +static int ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { @@ -482,7 +482,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, * * resume scheduling elements (0x040A) */ -static enum ice_status +static int ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { @@ -500,7 +500,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, * * Query scheduler resource allocation (0x0412) */ -static enum ice_status +static int ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, struct ice_aqc_query_txsched_res_resp *buf, struct ice_sq_cd *cd) @@ -520,18 +520,18 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, * * This function suspends or resumes HW nodes */ -static enum ice_status +static int ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, bool suspend) { u16 i, buf_size, num_elem_ret = 0; - enum ice_status status; __le32 *buf; + int status; buf_size = sizeof(*buf) * num_nodes; buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_nodes; i++) buf[i] = cpu_to_le32(node_teids[i]); @@ -558,7 +558,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, * @tc: TC number * @new_numqs: number of queues */ -static enum ice_status +static int ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; @@ -566,7 +566,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; /* allocate LAN queue contexts */ if (!vsi_ctx->lan_q_ctx[tc]) { vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), @@ -574,7 +574,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) sizeof(*q_ctx), GFP_KERNEL); if (!vsi_ctx->lan_q_ctx[tc]) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; vsi_ctx->num_lan_q_entries[tc] = new_numqs; return 0; } @@ -585,7 +585,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, sizeof(*q_ctx), GFP_KERNEL); if (!q_ctx) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], prev_num * sizeof(*q_ctx)); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); @@ -602,7 +602,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) * @tc: TC number * @new_numqs: number of queues */ -static enum ice_status +static int ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; @@ -610,7 +610,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; /* allocate RDMA queue contexts */ if (!vsi_ctx->rdma_q_ctx[tc]) { vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), @@ -618,7 +618,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) sizeof(*q_ctx), GFP_KERNEL); if (!vsi_ctx->rdma_q_ctx[tc]) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; vsi_ctx->num_rdma_q_entries[tc] = new_numqs; return 0; } @@ -629,7 +629,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, sizeof(*q_ctx), GFP_KERNEL); if (!q_ctx) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], prev_num * sizeof(*q_ctx)); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]); @@ -651,14 +651,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) * * RL profile function to add, query, or remove profile(s) */ -static enum ice_status +static int ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) { struct ice_aqc_rl_profile *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.rl_profile; @@ -682,7 +682,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, * * Add RL profile (0x0410) */ -static enum ice_status +static int ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_added, struct ice_sq_cd *cd) @@ -702,7 +702,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, * * Remove RL profile (0x0415) */ -static enum ice_status +static int ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_removed, struct ice_sq_cd *cd) @@ -721,24 +721,24 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, * its associated parameters from HW DB,and locally. The caller needs to * hold scheduler lock. */ -static enum ice_status +static int ice_sched_del_rl_profile(struct ice_hw *hw, struct ice_aqc_rl_profile_info *rl_info) { struct ice_aqc_rl_profile_elem *buf; u16 num_profiles_removed; - enum ice_status status; u16 num_profiles = 1; + int status; if (rl_info->prof_id_ref != 0) - return ICE_ERR_IN_USE; + return -EBUSY; /* Safe to remove profile ID */ buf = &rl_info->profile; status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), &num_profiles_removed, NULL); if (status || num_profiles_removed != num_profiles) - return ICE_ERR_CFG; + return -EIO; /* Delete stale entry now */ list_del(&rl_info->list_entry); @@ -763,7 +763,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi) list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, &pi->rl_prof_list[ln], list_entry) { struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; rl_prof_elem->prof_id_ref = 0; status = ice_sched_del_rl_profile(hw, rl_prof_elem); @@ -875,7 +875,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw) * * This function add nodes to HW as well as to SW DB for a given layer */ -static enum ice_status +static int ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, u16 num_nodes, u16 *num_nodes_added, u32 *first_node_teid) @@ -883,15 +883,15 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *prev, *new_node; struct ice_aqc_add_elem *buf; u16 i, num_groups_added = 0; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; size_t buf_size; + int status = 0; u32 teid; buf_size = struct_size(buf, generic, num_nodes); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = cpu_to_le16(num_nodes); @@ -918,7 +918,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", hw->adminq.sq_last_status); devm_kfree(ice_hw_to_dev(hw), buf); - return ICE_ERR_CFG; + return -EIO; } *num_nodes_added = num_nodes; @@ -974,7 +974,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, * * Add nodes into specific HW layer. */ -static enum ice_status +static int ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -989,7 +989,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, return 0; if (!parent || layer < pi->hw->sw_entry_point_layer) - return ICE_ERR_PARAM; + return -EINVAL; /* max children per node per layer */ max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; @@ -998,8 +998,8 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, if ((parent->num_children + num_nodes) > max_child_nodes) { /* Fail if the parent is a TC node */ if (parent == tc_node) - return ICE_ERR_CFG; - return ICE_ERR_MAX_LIMIT; + return -EIO; + return -ENOSPC; } return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, @@ -1018,7 +1018,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, * * This function add nodes to a given layer. */ -static enum ice_status +static int ice_sched_add_nodes_to_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -1027,7 +1027,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, { u32 *first_teid_ptr = first_node_teid; u16 new_num_nodes = num_nodes; - enum ice_status status = 0; + int status = 0; *num_nodes_added = 0; while (*num_nodes_added < num_nodes) { @@ -1045,14 +1045,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, if (*num_nodes_added > num_nodes) { ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, *num_nodes_added); - status = ICE_ERR_CFG; + status = -EIO; break; } /* break if all the nodes are added successfully */ if (!status && (*num_nodes_added == num_nodes)) break; /* break if the error is not max limit */ - if (status && status != ICE_ERR_MAX_LIMIT) + if (status && status != -ENOSPC) break; /* Exceeded the max children */ max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; @@ -1152,7 +1152,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) } if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { u32 teid = le32_to_cpu(node->info.node_teid); - enum ice_status status; + int status; /* remove the default leaf node */ status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); @@ -1198,23 +1198,23 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) * resources, default topology created by firmware and storing the information * in SW DB. */ -enum ice_status ice_sched_init_port(struct ice_port_info *pi) +int ice_sched_init_port(struct ice_port_info *pi) { struct ice_aqc_get_topo_elem *buf; - enum ice_status status; struct ice_hw *hw; u8 num_branches; u16 num_elems; + int status; u8 i, j; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; /* Query the Default Topology from FW */ buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Query default scheduling tree topology */ status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, @@ -1226,7 +1226,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", num_branches); - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_init_port; } @@ -1237,7 +1237,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", num_elems); - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_init_port; } @@ -1300,11 +1300,11 @@ err_init_port: * * query FW for allocated scheduler resources and store in HW struct */ -enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) +int ice_sched_query_res_alloc(struct ice_hw *hw) { struct ice_aqc_query_txsched_res_resp *buf; - enum ice_status status = 0; __le16 max_sibl; + int status = 0; u16 i; if (hw->layer_info) @@ -1312,7 +1312,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); if (status) @@ -1341,7 +1341,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) sizeof(*hw->layer_info)), GFP_KERNEL); if (!hw->layer_info) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto sched_query_out; } @@ -1614,31 +1614,31 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) * This function adds the VSI child nodes to tree. It gets called for * LAN and RDMA separately. */ -static enum ice_status +static int ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes, u8 owner) { struct ice_sched_node *parent, *node; struct ice_hw *hw = pi->hw; - enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, qgl, vsil; + int status; qgl = ice_sched_get_qgrp_layer(hw); vsil = ice_sched_get_vsi_layer(hw); parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); for (i = vsil + 1; i <= qgl; i++) { if (!parent) - return ICE_ERR_CFG; + return -EIO; status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_added); if (status || num_nodes[i] != num_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -1717,18 +1717,18 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, * This function adds the VSI supported nodes into Tx tree including the * VSI, its parent and intermediate nodes in below layers */ -static enum ice_status +static int ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *parent = tc_node; - enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, vsil; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { @@ -1737,7 +1737,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, &first_node_teid, &num_added); if (status || num_nodes[i] != num_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -1749,7 +1749,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, parent = parent->children[0]; if (!parent) - return ICE_ERR_CFG; + return -EIO; if (i == vsil) parent->vsi_handle = vsi_handle; @@ -1766,7 +1766,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, * * This function adds a new VSI into scheduler tree */ -static enum ice_status +static int ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) { u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; @@ -1774,7 +1774,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_PARAM; + return -EINVAL; /* calculate number of supported nodes needed for this VSI */ ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); @@ -1794,7 +1794,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) * * This function updates the VSI child nodes based on the number of queues */ -static enum ice_status +static int ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 new_numqs, u8 owner) { @@ -1802,21 +1802,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *vsi_node; struct ice_sched_node *tc_node; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; u16 prev_numqs; + int status = 0; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_CFG; + return -EIO; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; if (owner == ICE_SCHED_NODE_OWNER_LAN) prev_numqs = vsi_ctx->sched.max_lanq[tc]; @@ -1869,22 +1869,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, * enabled and VSI is in suspended state then resume the VSI back. If TC is * disabled then suspend the VSI if it is not already. */ -enum ice_status +int ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable) { struct ice_sched_node *vsi_node, *tc_node; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); /* suspend the VSI if TC is not enabled */ @@ -1908,7 +1908,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_CFG; + return -EIO; vsi_ctx->sched.vsi_node[tc] = vsi_node; vsi_node->in_use = true; @@ -1993,11 +1993,11 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) * This function removes the VSI and its LAN or RDMA children nodes from the * scheduler tree. */ -static enum ice_status +static int ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) { - enum ice_status status = ICE_ERR_PARAM; struct ice_vsi_ctx *vsi_ctx; + int status = -EINVAL; u8 i; ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); @@ -2022,7 +2022,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) if (ice_sched_is_leaf_node_present(vsi_node)) { ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); - status = ICE_ERR_IN_USE; + status = -EBUSY; goto exit_sched_rm_vsi_cfg; } while (j < vsi_node->num_children) { @@ -2065,7 +2065,7 @@ exit_sched_rm_vsi_cfg: * This function clears the VSI and its LAN children nodes from scheduler tree * for all TCs. */ -enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); } @@ -2078,7 +2078,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) * This function clears the VSI and its RDMA children nodes from scheduler tree * for all TCs. */ -enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA); } @@ -2188,36 +2188,36 @@ ice_sched_update_parent(struct ice_sched_node *new_parent, * * This function move the child nodes to a given parent. */ -static enum ice_status +static int ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, u16 num_items, u32 *list) { struct ice_aqc_move_elem *buf; struct ice_sched_node *node; - enum ice_status status = 0; u16 i, grps_movd = 0; struct ice_hw *hw; + int status = 0; u16 buf_len; hw = pi->hw; if (!parent || !num_items) - return ICE_ERR_PARAM; + return -EINVAL; /* Does parent have enough space */ if (parent->num_children + num_items > hw->max_children[parent->tx_sched_layer]) - return ICE_ERR_AQ_FULL; + return -ENOSPC; buf_len = struct_size(buf, teid, 1); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_items; i++) { node = ice_sched_find_node_by_teid(pi->root, list[i]); if (!node) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto move_err_exit; } @@ -2228,7 +2228,7 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, &grps_movd, NULL); if (status && grps_movd != 1) { - status = ICE_ERR_CFG; + status = -EIO; goto move_err_exit; } @@ -2251,28 +2251,28 @@ move_err_exit: * This function moves a VSI to an aggregator node or its subtree. * Intermediate nodes may be created if required. */ -static enum ice_status +static int ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, u8 tc) { struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; u32 first_node_teid, vsi_teid; - enum ice_status status; u16 num_nodes_added; u8 aggl, vsil, i; + int status; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Is this VSI already part of given aggregator? */ if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) @@ -2302,7 +2302,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, &first_node_teid, &num_nodes_added); if (status || num_nodes[i] != num_nodes_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -2314,7 +2314,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, parent = parent->children[0]; if (!parent) - return ICE_ERR_CFG; + return -EIO; } move_nodes: @@ -2333,14 +2333,14 @@ move_nodes: * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The * caller holds the scheduler lock. */ -static enum ice_status +static int ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *tmp; - enum ice_status status = 0; + int status = 0; list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list, list_entry) { @@ -2397,7 +2397,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) * This function removes the aggregator node and intermediate nodes if any * from the given TC */ -static enum ice_status +static int ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *tc_node, *agg_node; @@ -2405,15 +2405,15 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Can't remove the aggregator node if it has children */ if (ice_sched_is_agg_inuse(pi, agg_node)) - return ICE_ERR_IN_USE; + return -EBUSY; /* need to remove the whole subtree if aggregator node is the * only child. @@ -2422,7 +2422,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) struct ice_sched_node *parent = agg_node->parent; if (!parent) - return ICE_ERR_CFG; + return -EIO; if (parent->num_children > 1) break; @@ -2445,11 +2445,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) * the aggregator configuration completely for requested TC. The caller needs * to hold the scheduler lock. */ -static enum ice_status +static int ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { - enum ice_status status = 0; + int status = 0; /* If nothing to remove - return success */ if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) @@ -2478,7 +2478,7 @@ exit_rm_agg_cfg_tc: * Save aggregator TC bitmap. This function needs to be called with scheduler * lock held. */ -static enum ice_status +static int ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, unsigned long *tc_bitmap) { @@ -2486,7 +2486,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); return 0; @@ -2501,20 +2501,20 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, * This function creates an aggregator node and intermediate nodes if required * for the given TC */ -static enum ice_status +static int ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *parent, *agg_node, *tc_node; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; u32 first_node_teid; u16 num_nodes_added; + int status = 0; u8 i, aggl; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); /* Does Agg node already exist ? */ @@ -2549,14 +2549,14 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) parent = tc_node; for (i = hw->sw_entry_point_layer; i <= aggl; i++) { if (!parent) - return ICE_ERR_CFG; + return -EIO; status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_nodes_added); if (status || num_nodes[i] != num_nodes_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -2591,13 +2591,13 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) * resources and remove aggregator ID. * This function needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, unsigned long *tc_bitmap) { struct ice_sched_agg_info *agg_info; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; agg_info = ice_get_agg_info(hw, agg_id); @@ -2606,7 +2606,7 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info), GFP_KERNEL); if (!agg_info) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; agg_info->agg_id = agg_id; agg_info->agg_type = agg_type; @@ -2653,19 +2653,17 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, * * This function configures aggregator node(s). */ -enum ice_status +int ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, u8 tc_bitmap) { unsigned long bitmap = tc_bitmap; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); - status = ice_sched_cfg_agg(pi, agg_id, agg_type, - (unsigned long *)&bitmap); + status = ice_sched_cfg_agg(pi, agg_id, agg_type, &bitmap); if (!status) - status = ice_save_agg_tc_bitmap(pi, agg_id, - (unsigned long *)&bitmap); + status = ice_save_agg_tc_bitmap(pi, agg_id, &bitmap); mutex_unlock(&pi->sched_lock); return status; } @@ -2724,7 +2722,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) * Save VSI to aggregator TC bitmap. This function needs to call with scheduler * lock held. */ -static enum ice_status +static int ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, unsigned long *tc_bitmap) { @@ -2733,11 +2731,11 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; /* check if entry already exist */ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); if (!agg_vsi_info) - return ICE_ERR_PARAM; + return -EINVAL; bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); return 0; @@ -2754,21 +2752,21 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, * already associated to the aggregator node then no operation is performed on * the tree. This function needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, unsigned long *tc_bitmap) { struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL; struct ice_sched_agg_info *agg_info, *old_agg_info; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; agg_info = ice_get_agg_info(hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; /* If the VSI is already part of another aggregator then update * its VSI info list */ @@ -2790,7 +2788,7 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_vsi_info), GFP_KERNEL); if (!agg_vsi_info) - return ICE_ERR_PARAM; + return -EINVAL; /* add VSI ID into the aggregator list */ agg_vsi_info->vsi_handle = vsi_handle; @@ -2851,14 +2849,14 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) * returns success or error on config sched element failure. The caller * needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, struct ice_aqc_txsched_elem_data *info) { struct ice_aqc_txsched_elem_data buf; - enum ice_status status; u16 elem_cfgd = 0; u16 num_elems = 1; + int status; buf = *info; /* Parent TEID is reserved field in this aq call */ @@ -2874,7 +2872,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, &elem_cfgd, NULL); if (status || elem_cfgd != num_elems) { ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); - return ICE_ERR_CFG; + return -EIO; } /* Config success case */ @@ -2893,7 +2891,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, * * This function configures node element's BW allocation. */ -static enum ice_status +static int ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 bw_alloc) { @@ -2909,7 +2907,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc); } else { - return ICE_ERR_PARAM; + return -EINVAL; } /* Configure element */ @@ -2925,12 +2923,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, * * Move or associate VSI to a new or default aggregator node. */ -enum ice_status +int ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, u8 tc_bitmap) { unsigned long bitmap = tc_bitmap; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, @@ -3098,12 +3096,12 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) * * This function converts the BW to profile structure format. */ -static enum ice_status +static int ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, struct ice_aqc_rl_profile_elem *profile) { - enum ice_status status = ICE_ERR_PARAM; s64 bytes_per_sec, ts_rate, mv_tmp; + int status = -EINVAL; bool found = false; s32 encode = 0; s64 mv = 0; @@ -3150,7 +3148,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, profile->rl_encode = cpu_to_le16(encode); status = 0; } else { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } return status; @@ -3176,9 +3174,9 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, struct ice_aqc_rl_profile_info *rl_prof_elem; u16 profiles_added = 0, num_profiles = 1; struct ice_aqc_rl_profile_elem *buf; - enum ice_status status; struct ice_hw *hw; u8 profile_type; + int status; if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) return NULL; @@ -3249,7 +3247,7 @@ exit_add_rl_prof: * * This function configures node element's BW limit. */ -static enum ice_status +static int ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 rl_prof_id) { @@ -3268,7 +3266,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, * hence only one of them may be set for any given element */ if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) - return ICE_ERR_CFG; + return -EIO; data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); break; @@ -3291,7 +3289,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && (le16_to_cpu(data->eir_bw.bw_profile_idx) != ICE_SCHED_DFLT_RL_PROF_ID)) - return ICE_ERR_CFG; + return -EIO; /* EIR BW is set to default, disable it */ data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; /* Okay to enable shared BW now */ @@ -3300,7 +3298,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, break; default: /* Unknown rate limit type */ - return ICE_ERR_PARAM; + return -EINVAL; } /* Configure element */ @@ -3420,15 +3418,15 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold * scheduler lock. */ -static enum ice_status +static int ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, u16 profile_id) { struct ice_aqc_rl_profile_info *rl_prof_elem; - enum ice_status status = 0; + int status = 0; if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) - return ICE_ERR_PARAM; + return -EINVAL; /* Check the existing list for RL profile */ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], list_entry) @@ -3441,11 +3439,11 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, /* Remove old profile ID from database */ status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); - if (status && status != ICE_ERR_IN_USE) + if (status && status != -EBUSY) ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); break; } - if (status == ICE_ERR_IN_USE) + if (status == -EBUSY) status = 0; return status; } @@ -3461,16 +3459,16 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_node_bw_dflt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u8 layer_num) { - enum ice_status status; struct ice_hw *hw; u8 profile_type; u16 rl_prof_id; u16 old_id; + int status; hw = pi->hw; switch (rl_type) { @@ -3488,7 +3486,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi, rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } /* Save existing RL prof ID for later clean up */ old_id = ice_sched_get_node_rl_prof_id(node, rl_type); @@ -3518,7 +3516,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi, * them may be set for any given element. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_eir_srl_excl(struct ice_port_info *pi, struct ice_sched_node *node, u8 layer_num, enum ice_rl_type rl_type, u32 bw) @@ -3562,14 +3560,14 @@ ice_sched_set_eir_srl_excl(struct ice_port_info *pi, * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile * ID from local database. The caller needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw, u8 layer_num) { struct ice_aqc_rl_profile_info *rl_prof_info; - enum ice_status status = ICE_ERR_PARAM; struct ice_hw *hw = pi->hw; u16 old_id, rl_prof_id; + int status = -EINVAL; rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num); if (!rl_prof_info) @@ -3608,31 +3606,31 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, * It updates node's BW limit parameters like BW RL profile ID of type CIR, * EIR, or SRL. The caller needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw) { struct ice_sched_node *cfg_node = node; - enum ice_status status; + int status; struct ice_hw *hw; u8 layer_num; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; /* Remove unused RL profile IDs from HW and SW DB */ ice_sched_rm_unused_rl_prof(pi); layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, node->tx_sched_layer); if (layer_num >= hw->num_tx_sched_layers) - return ICE_ERR_PARAM; + return -EINVAL; if (rl_type == ICE_SHARED_BW) { /* SRL node may be different */ cfg_node = ice_sched_get_srl_node(node, layer_num); if (!cfg_node) - return ICE_ERR_CFG; + return -EIO; } /* EIR BW and Shared BW profiles are mutually exclusive and * hence only one of them may be set for any given element @@ -3657,7 +3655,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type) @@ -3675,7 +3673,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, * behalf of the requested node (first argument). This function needs to be * called with scheduler lock held. */ -static enum ice_status +static int ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) { /* SRL profiles are not available on all layers. Check if the @@ -3690,7 +3688,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) (node->parent && node->parent->num_children == 1))) return 0; - return ICE_ERR_CFG; + return -EIO; } /** @@ -3701,7 +3699,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) * * Save BW information of queue type node for post replay use. */ -static enum ice_status +static int ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) { switch (rl_type) { @@ -3715,7 +3713,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } return 0; } @@ -3731,16 +3729,16 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) * * This function sets BW limit of queue scheduling node. */ -static enum ice_status +static int ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; struct ice_q_ctx *q_ctx; + int status = -EINVAL; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); if (!q_ctx) @@ -3762,7 +3760,7 @@ ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, node->tx_sched_layer); if (sel_layer >= pi->hw->num_tx_sched_layers) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto exit_q_bw_lmt; } status = ice_sched_validate_srl_node(node, sel_layer); @@ -3794,7 +3792,7 @@ exit_q_bw_lmt: * * This function configures BW limit of queue scheduling node. */ -enum ice_status +int ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { @@ -3812,7 +3810,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * * This function configures BW default limit of queue scheduling node. */ -enum ice_status +int ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type) { @@ -3880,13 +3878,13 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, * This function sets BW limit of VSI or Aggregator scheduling node * based on TC information from passed in argument BW. */ -static enum ice_status +int ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, enum ice_agg_type agg_type, u8 tc, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; + int status = -EINVAL; if (!pi) return status; @@ -3921,7 +3919,7 @@ exit_set_node_bw_lmt_per_tc: * This function configures BW limit of VSI scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw) { @@ -3948,7 +3946,7 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * This function configures default BW limit of VSI scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type) { @@ -3976,13 +3974,13 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * burst size value is used for future rate limit calls. It doesn't change the * existing or previously created RL profiles. */ -enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) +int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) { u16 burst_size_to_prog; if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || bytes > ICE_MAX_BURST_SIZE_ALLOWED) - return ICE_ERR_PARAM; + return -EINVAL; if (ice_round_to_num(bytes, 64) <= ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { /* 64 byte granularity case */ @@ -4017,13 +4015,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) * This function configures node element's priority value. It * needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, u8 priority) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; - enum ice_status status; + int status; buf = node->info; data = &buf.data; @@ -4044,12 +4042,12 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, * This function restores node's BW from bw_t_info. The caller needs * to hold the scheduler lock. */ -static enum ice_status +static int ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, struct ice_bw_type_info *bw_t_info) { struct ice_port_info *pi = hw->port_info; - enum ice_status status = ICE_ERR_PARAM; + int status = -EINVAL; u16 bw_alloc; if (!node) @@ -4137,7 +4135,7 @@ void ice_sched_replay_agg(struct ice_hw *hw) if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); - enum ice_status status; + int status; bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); ice_sched_get_ena_tc_bitmap(pi, @@ -4191,18 +4189,17 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) * their node bandwidth information. This function needs to be called with * scheduler lock held. */ -static enum ice_status -ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) +static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_port_info *pi = hw->port_info; struct ice_sched_agg_info *agg_info; - enum ice_status status; + int status; bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; agg_info = ice_get_vsi_agg_info(hw, vsi_handle); if (!agg_info) return 0; /* Not present in list - default Agg case */ @@ -4233,10 +4230,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) * This function replays association of VSI to aggregator type nodes, and * node bandwidth information. */ -enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { struct ice_port_info *pi = hw->port_info; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_sched_replay_vsi_agg(hw, vsi_handle); @@ -4252,14 +4249,13 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) * This function replays queue type node bandwidth. This function needs to be * called with scheduler lock held. */ -enum ice_status -ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) +int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) { struct ice_sched_node *q_node; /* Following also checks the presence of node in tree */ q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); if (!q_node) - return ICE_ERR_PARAM; + return -EINVAL; return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); } diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 6bddcbecaf5e..4f91577fed56 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -65,12 +65,12 @@ struct ice_sched_agg_info { }; /* FW AQ command calls */ -enum ice_status +int ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd); -enum ice_status ice_sched_init_port(struct ice_port_info *pi); -enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); +int ice_sched_init_port(struct ice_port_info *pi); +int ice_sched_query_res_alloc(struct ice_hw *hw); void ice_sched_get_psm_clk_freq(struct ice_hw *hw); void ice_sched_clear_port(struct ice_port_info *pi); @@ -79,7 +79,7 @@ void ice_sched_clear_agg(struct ice_hw *hw); struct ice_sched_node * ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); -enum ice_status +int ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info); void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node); @@ -87,35 +87,38 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); struct ice_sched_node * ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 owner); -enum ice_status +int ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable); -enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); -enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle); +int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); +int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle); /* Tx scheduler rate limiter functions */ -enum ice_status +int ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, u8 tc_bitmap); -enum ice_status +int ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, u8 tc_bitmap); -enum ice_status +int ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw); -enum ice_status +int ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type); -enum ice_status +int ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw); -enum ice_status +int ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type); -enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); +int +ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, + enum ice_agg_type agg_type, u8 tc, + enum ice_rl_type rl_type, u32 bw); +int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw); void ice_sched_replay_agg(struct ice_hw *hw); -enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status -ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); +int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); +int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index aa11d07793d4..52c6bac41bf7 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -18,7 +18,7 @@ * queue and asynchronously sending message via * ice_sq_send_cmd() function */ -enum ice_status +int ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct ice_sq_cd *cd) { @@ -228,7 +228,7 @@ ice_mbx_traverse(struct ice_hw *hw, * sent per VF and marks the VF as malicious if it exceeds * the permissible number of messages to send. */ -static enum ice_status +static int ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id, enum ice_mbx_snapshot_state *new_state, bool *is_malvf) @@ -236,7 +236,7 @@ ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id, struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; if (vf_id >= snap->mbx_vf.vfcntr_len) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* increment the message count in the VF array */ snap->mbx_vf.vf_cntr[vf_id]++; @@ -297,7 +297,7 @@ static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap) * Detect: If pending message count exceeds watermark traverse * the static snapshot and look for a malicious VF. */ -enum ice_status +int ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, u16 vf_id, bool *is_malvf) @@ -306,10 +306,10 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_snap_buffer_data *snap_buf; struct ice_ctl_q_info *cq = &hw->mailboxq; enum ice_mbx_snapshot_state new_state; - enum ice_status status = 0; + int status = 0; if (!is_malvf || !mbx_data) - return ICE_ERR_BAD_PTR; + return -EINVAL; /* When entering the mailbox state machine assume that the VF * is not malicious until detected. @@ -320,7 +320,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, * interrupt is not less than the defined AVF message threshold. */ if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; /* The watermark value should not be lesser than the threshold limit * set for the number of asynchronous messages a VF can send to mailbox @@ -329,7 +329,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, */ if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD || mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx) - return ICE_ERR_PARAM; + return -EINVAL; new_state = ICE_MAL_VF_DETECT_STATE_INVALID; snap_buf = &snap->mbx_buf; @@ -383,7 +383,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, default: new_state = ICE_MAL_VF_DETECT_STATE_INVALID; - status = ICE_ERR_CFG; + status = -EIO; } snap_buf->state = new_state; @@ -405,20 +405,20 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, * the input vf_id against the bitmap to verify if the VF has been * detected in any previous mailbox iterations. */ -enum ice_status +int ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id, bool *report_malvf) { if (!all_malvfs || !report_malvf) - return ICE_ERR_PARAM; + return -EINVAL; *report_malvf = false; if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; if (vf_id >= bitmap_len) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* If the vf_id is found in the bitmap set bit and boolean to true */ if (!test_and_set_bit(vf_id, all_malvfs)) @@ -441,19 +441,19 @@ ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, * that the new VF loaded is not considered malicious before going * through the overflow detection algorithm. */ -enum ice_status +int ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id) { if (!snap || !all_malvfs) - return ICE_ERR_PARAM; + return -EINVAL; if (bitmap_len < snap->mbx_vf.vfcntr_len) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; /* Ensure VF ID value is not larger than bitmap or VF counter length */ if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */ clear_bit(vf_id, all_malvfs); @@ -482,7 +482,7 @@ ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, * called to ensure that the vf_count can be compared against the number * of VFs supported as defined in the functional capabilities of the device. */ -enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) +int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) { struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; @@ -491,13 +491,13 @@ enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) * the functional capabilities of the PF. */ if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count, sizeof(*snap->mbx_vf.vf_cntr), GFP_KERNEL); if (!snap->mbx_vf.vf_cntr) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Setting the VF counter length to the number of allocated * VFs for given PF's functional capabilities. diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h index 161dc55d9e9c..68686a3fd7e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.h +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -14,24 +14,24 @@ #define ICE_ASYNC_VF_MSG_THRESHOLD 63 #ifdef CONFIG_PCI_IOV -enum ice_status +int ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct ice_sq_cd *cd); u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); -enum ice_status +int ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, u16 vf_id, bool *is_mal_vf); -enum ice_status +int ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id); -enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count); +int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count); void ice_mbx_deinit_snapshot(struct ice_hw *hw); -enum ice_status +int ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id, bool *report_malvf); #else /* CONFIG_PCI_IOV */ -static inline enum ice_status +static inline int ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw, u16 __always_unused vfid, u32 __always_unused v_opcode, u32 __always_unused v_retval, u8 __always_unused *msg, diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h deleted file mode 100644 index dbf66057371d..000000000000 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018, Intel Corporation. */ - -#ifndef _ICE_STATUS_H_ -#define _ICE_STATUS_H_ - -/* Error Codes */ -enum ice_status { - ICE_SUCCESS = 0, - - /* Generic codes : Range -1..-49 */ - ICE_ERR_PARAM = -1, - ICE_ERR_NOT_IMPL = -2, - ICE_ERR_NOT_READY = -3, - ICE_ERR_NOT_SUPPORTED = -4, - ICE_ERR_BAD_PTR = -5, - ICE_ERR_INVAL_SIZE = -6, - ICE_ERR_DEVICE_NOT_SUPPORTED = -8, - ICE_ERR_RESET_FAILED = -9, - ICE_ERR_FW_API_VER = -10, - ICE_ERR_NO_MEMORY = -11, - ICE_ERR_CFG = -12, - ICE_ERR_OUT_OF_RANGE = -13, - ICE_ERR_ALREADY_EXISTS = -14, - ICE_ERR_DOES_NOT_EXIST = -15, - ICE_ERR_IN_USE = -16, - ICE_ERR_MAX_LIMIT = -17, - ICE_ERR_RESET_ONGOING = -18, - ICE_ERR_HW_TABLE = -19, - ICE_ERR_FW_DDP_MISMATCH = -20, - - ICE_ERR_NVM = -50, - ICE_ERR_NVM_CHECKSUM = -51, - ICE_ERR_BUF_TOO_SHORT = -52, - ICE_ERR_NVM_BLANK_MODE = -53, - ICE_ERR_AQ_ERROR = -100, - ICE_ERR_AQ_TIMEOUT = -101, - ICE_ERR_AQ_FULL = -102, - ICE_ERR_AQ_NO_WORK = -103, - ICE_ERR_AQ_EMPTY = -104, - ICE_ERR_AQ_FW_CRITICAL = -105, -}; - -#endif /* _ICE_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 793f4a9fc2cd..e477c2b1d5bb 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -528,7 +528,7 @@ static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES], * Allocate memory for the entire recipe table and initialize the structures/ * entries corresponding to basic recipes. */ -enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) +int ice_init_def_sw_recp(struct ice_hw *hw) { struct ice_sw_recipe *recps; u8 i; @@ -536,7 +536,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, sizeof(*recps), GFP_KERNEL); if (!recps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { recps[i].root_rid = i; @@ -576,14 +576,14 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) * in response buffer. The caller of this function to use *num_elems while * parsing the response buffer. */ -static enum ice_status +static int ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, u16 buf_size, u16 *req_desc, u16 *num_elems, struct ice_sq_cd *cd) { struct ice_aqc_get_sw_cfg *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); cmd = &desc.params.get_sw_conf; @@ -606,14 +606,14 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, * * Add a VSI context to the hardware (0x0210) */ -static enum ice_status +static int ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *res; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; res = &desc.params.add_update_free_vsi_res; @@ -650,14 +650,14 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, * * Free VSI context info from hardware (0x0213) */ -static enum ice_status +static int ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; resp = &desc.params.add_update_free_vsi_res; @@ -685,14 +685,14 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, * * Update VSI context in the hardware (0x0211) */ -static enum ice_status +static int ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; resp = &desc.params.add_update_free_vsi_res; @@ -832,15 +832,15 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw) * If this function gets called after reset for existing VSIs then update * with the new HW VSI number in the corresponding VSI handle list entry. */ -enum ice_status +int ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_vsi_ctx *tmp_vsi_ctx; - enum ice_status status; + int status; if (vsi_handle >= ICE_MAX_VSI) - return ICE_ERR_PARAM; + return -EINVAL; status = ice_aq_add_vsi(hw, vsi_ctx, cd); if (status) return status; @@ -851,7 +851,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, sizeof(*tmp_vsi_ctx), GFP_KERNEL); if (!tmp_vsi_ctx) { ice_aq_free_vsi(hw, vsi_ctx, false, cd); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } *tmp_vsi_ctx = *vsi_ctx; ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); @@ -873,14 +873,14 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, * * Free VSI context info from hardware as well as from VSI handle list */ -enum ice_status +int ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd) { - enum ice_status status; + int status; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); if (!status) @@ -897,12 +897,12 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, * * Update VSI context in the hardware */ -enum ice_status +int ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); return ice_aq_update_vsi(hw, vsi_ctx, cd); } @@ -927,7 +927,7 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) else ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; - return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL)); + return ice_update_vsi(hw, vsi_handle, ctx, NULL); } /** @@ -939,20 +939,20 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) * * allocates or free a VSI list resource */ -static enum ice_status +static int ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type, enum ice_adminq_opc opc) { struct ice_aqc_alloc_free_res_elem *sw_buf; struct ice_aqc_res_elem *vsi_ele; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(sw_buf, elem, 1); sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); if (lkup_type == ICE_SW_LKUP_MAC || @@ -966,7 +966,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); } else { - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_alloc_free_vsi_list_exit; } @@ -998,17 +998,17 @@ ice_aq_alloc_free_vsi_list_exit: * * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware */ -enum ice_status +int ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aq_desc desc; - enum ice_status status; + int status; if (opc != ice_aqc_opc_add_sw_rules && opc != ice_aqc_opc_update_sw_rules && opc != ice_aqc_opc_remove_sw_rules) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); @@ -1018,7 +1018,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); if (opc != ice_aqc_opc_add_sw_rules && hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; return status; } @@ -1032,7 +1032,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, * * Add(0x0290) */ -static enum ice_status +static int ice_aq_add_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 num_recipes, struct ice_sq_cd *cd) @@ -1069,18 +1069,18 @@ ice_aq_add_recipe(struct ice_hw *hw, * The caller must supply enough space in s_recipe_list to hold all possible * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. */ -static enum ice_status +static int ice_aq_get_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) { struct ice_aqc_add_get_recipe *cmd; struct ice_aq_desc desc; - enum ice_status status; u16 buf_size; + int status; if (*num_recipes != ICE_MAX_NUM_RECIPES) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.add_get_recipe; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); @@ -1104,7 +1104,7 @@ ice_aq_get_recipe(struct ice_hw *hw, * @cd: pointer to command details structure or NULL * Recipe to profile association (0x0291) */ -static enum ice_status +static int ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd) { @@ -1130,13 +1130,13 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * @cd: pointer to command details structure or NULL * Associate profile ID with given recipe (0x0293) */ -static enum ice_status +static int ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd) { struct ice_aqc_recipe_to_profile *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.recipe_to_profile; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); @@ -1154,16 +1154,16 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * @hw: pointer to the hardware structure * @rid: recipe ID returned as response to AQ call */ -static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid) +static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) { struct ice_aqc_alloc_free_res_elem *sw_buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(sw_buf, elem, 1); sw_buf = kzalloc(buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << @@ -1230,7 +1230,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, * bookkeeping so that we have a current list of all the recipes that are * programmed in the firmware. */ -static enum ice_status +static int ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, bool *refresh_required) { @@ -1238,16 +1238,16 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, struct ice_aqc_recipe_data_elem *tmp; u16 num_recps = ICE_MAX_NUM_RECIPES; struct ice_prot_lkup_ext *lkup_exts; - enum ice_status status; u8 fv_word_idx = 0; u16 sub_recps; + int status; bitmap_zero(result_bm, ICE_MAX_FV_WORDS); /* we need a buffer big enough to accommodate all the recipes */ tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; tmp[0].recipe_indx = rid; status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); @@ -1284,7 +1284,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), GFP_KERNEL); if (!rg_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll; } @@ -1364,7 +1364,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), GFP_KERNEL); if (!recps[rid].root_buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll; } @@ -1407,19 +1407,19 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, /* ice_get_initial_sw_cfg - Get initial port and default VSI data * @hw: pointer to the hardware structure */ -enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) +int ice_get_initial_sw_cfg(struct ice_hw *hw) { struct ice_aqc_get_sw_cfg_resp_elem *rbuf; - enum ice_status status; u16 req_desc = 0; u16 num_elems; + int status; u16 i; rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL); if (!rbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Multiple calls to ice_aq_get_sw_cfg may be required * to get all the switch configuration information. The need @@ -1670,7 +1670,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, * Create a large action to hold software marker and update the switch rule * entry pointed by m_ent with newly created large action */ -static enum ice_status +static int ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, u16 sw_marker, u16 l_id) { @@ -1681,14 +1681,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, * 3. GENERIC VALUE action to hold the software marker ID */ const u16 num_lg_acts = 3; - enum ice_status status; u16 lg_act_size; u16 rules_size; + int status; u32 act; u16 id; if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; + return -EINVAL; /* Create two back-to-back switch rules and submit them to the HW using * one memory buffer: @@ -1699,7 +1699,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); if (!lg_act) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); @@ -1808,19 +1808,19 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, * Call AQ command to add a new switch rule or update existing switch rule * using the given VSI list ID */ -static enum ice_status +static int ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, enum ice_sw_lkup_type lkup_type) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; u16 s_rule_size; u16 rule_type; + int status; int i; if (!num_vsi) - return ICE_ERR_PARAM; + return -EINVAL; if (lkup_type == ICE_SW_LKUP_MAC || lkup_type == ICE_SW_LKUP_MAC_VLAN || @@ -1834,15 +1834,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; else - return ICE_ERR_PARAM; + return -EINVAL; s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_vsi; i++) { if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto exit; } /* AQ call requires hw_vsi_id(s) */ @@ -1869,11 +1869,11 @@ exit: * @vsi_list_id: stores the ID of the VSI list to be created * @lkup_type: switch rule filter's lookup type */ -static enum ice_status +static int ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) { - enum ice_status status; + int status; status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, ice_aqc_opc_alloc_res); @@ -1895,7 +1895,7 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, * to the corresponding filter management list to track this switch rule * and VSI mapping */ -static enum ice_status +static int ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { @@ -1903,16 +1903,16 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_aqc_sw_rules_elem *s_rule; enum ice_sw_lkup_type l_type; struct ice_sw_recipe *recp; - enum ice_status status; + int status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), GFP_KERNEL); if (!fm_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_create_pkt_fwd_rule_exit; } @@ -1959,16 +1959,16 @@ ice_create_pkt_fwd_rule_exit: * Call AQ command to update a previously created switch rule with a * VSI list ID */ -static enum ice_status +static int ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; + int status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); @@ -1988,13 +1988,13 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) * * Updates unicast switch filter rules based on VEB/VEPA mode */ -enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) +int ice_update_sw_rule_bridge_mode(struct ice_hw *hw) { struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; - enum ice_status status = 0; struct list_head *rule_head; struct mutex *rule_lock; /* Lock to protect filter rule list */ + int status = 0; rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; @@ -2044,24 +2044,24 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) * Add the new VSI to the previously created VSI list set * using the update switch rule command */ -static enum ice_status +static int ice_add_update_vsi_list(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_entry, struct ice_fltr_info *cur_fltr, struct ice_fltr_info *new_fltr) { - enum ice_status status = 0; u16 vsi_list_id = 0; + int status = 0; if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if ((new_fltr->fltr_act == ICE_FWD_TO_Q || new_fltr->fltr_act == ICE_FWD_TO_QGRP) && (cur_fltr->fltr_act == ICE_FWD_TO_VSI || cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { /* Only one entry existed in the mapping and it was not already @@ -2073,7 +2073,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; vsi_handle_arr[0] = cur_fltr->vsi_handle; vsi_handle_arr[1] = new_fltr->vsi_handle; @@ -2101,7 +2101,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, vsi_list_id); if (!m_entry->vsi_list_info) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* If this entry was large action then the large action needs * to be updated to point to FWD to VSI list @@ -2116,7 +2116,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, enum ice_adminq_opc opcode; if (!m_entry->vsi_list_info) - return ICE_ERR_CFG; + return -EIO; /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) @@ -2209,7 +2209,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, * * Adds or updates the rule lists for a given recipe */ -static enum ice_status +static int ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, struct ice_fltr_list_entry *f_entry) { @@ -2217,10 +2217,10 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *new_fltr, *cur_fltr; struct ice_fltr_mgmt_list_entry *m_entry; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); @@ -2255,18 +2255,18 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, * The VSI list should be emptied before this function is called to remove the * VSI list. */ -static enum ice_status +static int ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, enum ice_sw_lkup_type lkup_type) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; u16 s_rule_size; + int status; s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); @@ -2288,21 +2288,21 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, * @fm_list: filter management entry for which the VSI list management needs to * be done */ -static enum ice_status +static int ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, struct ice_fltr_mgmt_list_entry *fm_list) { enum ice_sw_lkup_type lkup_type; - enum ice_status status = 0; u16 vsi_list_id; + int status = 0; if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || fm_list->vsi_count == 0) - return ICE_ERR_PARAM; + return -EINVAL; /* A rule with the VSI being removed does not exist */ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; lkup_type = fm_list->fltr_info.lkup_type; vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; @@ -2324,7 +2324,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, ICE_MAX_VSI); if (!ice_is_vsi_valid(hw, rem_vsi_handle)) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* Make sure VSI list is empty before removing it below */ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, @@ -2375,19 +2375,19 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, * @recp_id: recipe ID for which the rule needs to removed * @f_entry: rule entry containing filter information */ -static enum ice_status +static int ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, struct ice_fltr_list_entry *f_entry) { struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *list_elem; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; bool remove_rule = false; u16 vsi_handle; + int status = 0; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); @@ -2395,14 +2395,14 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, mutex_lock(rule_lock); list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); if (!list_elem) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { remove_rule = true; } else if (!list_elem->vsi_list_info) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } else if (list_elem->vsi_list_info->ref_cnt > 1) { /* a ref_cnt > 1 indicates that the vsi_list is being @@ -2435,7 +2435,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); if (!s_rule) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto exit; } @@ -2590,7 +2590,7 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle) * check for duplicates in this case, removing duplicates from a given * list should be taken care of in the caller of this function. */ -enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) +int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_aqc_sw_rules_elem *s_rule, *r_iter; struct ice_fltr_list_entry *m_list_itr; @@ -2598,12 +2598,12 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) u16 total_elem_left, s_rule_size; struct ice_switch_info *sw; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; u16 num_unicast = 0; + int status = 0; u8 elem_sent; if (!m_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; s_rule = NULL; sw = hw->switch_info; @@ -2616,23 +2616,23 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) m_list_itr->fltr_info.flag = ICE_FLTR_TX; vsi_handle = m_list_itr->fltr_info.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; /* update the src in case it is VSI num */ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) - return ICE_ERR_PARAM; + return -EINVAL; m_list_itr->fltr_info.src = hw_vsi_id; if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || is_zero_ether_addr(add)) - return ICE_ERR_PARAM; + return -EINVAL; if (is_unicast_ether_addr(add) && !hw->ucast_shared) { /* Don't overwrite the unicast address */ mutex_lock(rule_lock); if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, &m_list_itr->fltr_info)) { mutex_unlock(rule_lock); - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; } mutex_unlock(rule_lock); num_unicast++; @@ -2660,7 +2660,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, GFP_KERNEL); if (!s_rule) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_add_mac_exit; } @@ -2710,7 +2710,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), GFP_KERNEL); if (!fm_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_add_mac_exit; } fm_entry->fltr_info = *f_info; @@ -2737,7 +2737,7 @@ ice_add_mac_exit: * @hw: pointer to the hardware structure * @f_entry: filter entry containing one VLAN information */ -static enum ice_status +static int ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { struct ice_switch_info *sw = hw->switch_info; @@ -2746,10 +2746,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) enum ice_sw_lkup_type lkup_type; u16 vsi_list_id = 0, vsi_handle; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); @@ -2757,10 +2757,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) /* VLAN ID should only be 12 bits */ if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) - return ICE_ERR_PARAM; + return -EINVAL; if (new_fltr->src_id != ICE_SRC_ID_VSI) - return ICE_ERR_PARAM; + return -EINVAL; new_fltr->src = new_fltr->fwd_id.hw_vsi_id; lkup_type = new_fltr->lkup_type; @@ -2799,7 +2799,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); if (!v_list_itr) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } /* reuse VSI list for new rule and increment ref_cnt */ @@ -2835,7 +2835,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) if (v_list_itr->vsi_count > 1 && v_list_itr->vsi_list_info->ref_cnt > 1) { ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); - status = ICE_ERR_CFG; + status = -EIO; goto exit; } @@ -2845,7 +2845,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) /* A rule already exists with the new VSI being added */ if (cur_handle == vsi_handle) { - status = ICE_ERR_ALREADY_EXISTS; + status = -EEXIST; goto exit; } @@ -2890,16 +2890,16 @@ exit: * @hw: pointer to the hardware structure * @v_list: list of VLAN entries and forwarding information */ -enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) +int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) { struct ice_fltr_list_entry *v_list_itr; if (!v_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry(v_list_itr, v_list, list_entry) { if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) - return ICE_ERR_PARAM; + return -EINVAL; v_list_itr->fltr_info.flag = ICE_FLTR_TX; v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); if (v_list_itr->status) @@ -2917,13 +2917,12 @@ enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) * the filter list with the necessary fields (including flags to * indicate Tx or Rx rules). */ -enum ice_status -ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) +int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) { struct ice_fltr_list_entry *em_list_itr; if (!em_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry(em_list_itr, em_list, list_entry) { enum ice_sw_lkup_type l_type = @@ -2931,7 +2930,7 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && l_type != ICE_SW_LKUP_ETHERTYPE) - return ICE_ERR_PARAM; + return -EINVAL; em_list_itr->status = ice_add_rule_internal(hw, l_type, em_list_itr); @@ -2946,13 +2945,12 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) * @hw: pointer to the hardware structure * @em_list: list of ethertype or ethertype MAC entries */ -enum ice_status -ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) +int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) { struct ice_fltr_list_entry *em_list_itr, *tmp; if (!em_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { enum ice_sw_lkup_type l_type = @@ -2960,7 +2958,7 @@ ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && l_type != ICE_SW_LKUP_ETHERTYPE) - return ICE_ERR_PARAM; + return -EINVAL; em_list_itr->status = ice_remove_rule_internal(hw, l_type, em_list_itr); @@ -3020,18 +3018,17 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) * add filter rule to set/unset given VSI as default VSI for the switch * (represented by swid) */ -enum ice_status -ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) +int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) { struct ice_aqc_sw_rules_elem *s_rule; struct ice_fltr_info f_info; enum ice_adminq_opc opcode; - enum ice_status status; u16 s_rule_size; u16 hw_vsi_id; + int status; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : @@ -3039,7 +3036,7 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; memset(&f_info, 0, sizeof(f_info)); @@ -3137,18 +3134,18 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, * This function removes either a MAC filter rule or a specific VSI from a * VSI list for a multicast MAC address. * - * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by - * ice_add_mac. Caller should be aware that this call will only work if all - * the entries passed into m_list were added previously. It will not attempt to - * do a partial remove of entries that were found. + * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should + * be aware that this call will only work if all the entries passed into m_list + * were added previously. It will not attempt to do a partial remove of entries + * that were found. */ -enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) +int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_fltr_list_entry *list_itr, *tmp; struct mutex *rule_lock; /* Lock to protect filter rule list */ if (!m_list) - return ICE_ERR_PARAM; + return -EINVAL; rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { @@ -3157,11 +3154,11 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) u16 vsi_handle; if (l_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; + return -EINVAL; vsi_handle = list_itr->fltr_info.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; list_itr->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); @@ -3174,7 +3171,7 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, &list_itr->fltr_info)) { mutex_unlock(rule_lock); - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } mutex_unlock(rule_lock); } @@ -3192,19 +3189,18 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) * @hw: pointer to the hardware structure * @v_list: list of VLAN entries and forwarding information */ -enum ice_status -ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) +int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) { struct ice_fltr_list_entry *v_list_itr, *tmp; if (!v_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; if (l_type != ICE_SW_LKUP_VLAN) - return ICE_ERR_PARAM; + return -EINVAL; v_list_itr->status = ice_remove_rule_internal(hw, ICE_SW_LKUP_VLAN, v_list_itr); @@ -3242,7 +3238,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) * fltr_info.fwd_id fields. These are set such that later logic can * extract which VSI to remove the fltr from, and pass on that information. */ -static enum ice_status +static int ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct list_head *vsi_list_head, struct ice_fltr_info *fi) @@ -3254,7 +3250,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, */ tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; tmp->fltr_info = *fi; @@ -3285,17 +3281,17 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, * Note that this means all entries in vsi_list_head must be explicitly * deallocated by the caller when done with list. */ -static enum ice_status +static int ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct list_head *lkup_list_head, struct list_head *vsi_list_head) { struct ice_fltr_mgmt_list_entry *fm_entry; - enum ice_status status = 0; + int status = 0; /* check to make sure VSI ID is valid and within boundary */ if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry(fm_entry, lkup_list_head, list_entry) { if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) @@ -3349,9 +3345,8 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) * @recp_id: recipe ID for which the rule needs to removed * @v_list: list of promisc entries */ -static enum ice_status -ice_remove_promisc(struct ice_hw *hw, u8 recp_id, - struct list_head *v_list) +static int +ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list) { struct ice_fltr_list_entry *v_list_itr, *tmp; @@ -3371,7 +3366,7 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id, * @promisc_mask: mask of promiscuous config bits to clear * @vid: VLAN ID to clear VLAN promiscuous */ -enum ice_status +int ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { @@ -3381,11 +3376,11 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, struct ice_fltr_mgmt_list_entry *itr; struct list_head *rule_head; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; u8 recipe_id; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) recipe_id = ICE_SW_LKUP_PROMISC_VLAN; @@ -3444,20 +3439,20 @@ free_fltr_list: * @promisc_mask: mask of promiscuous config bits * @vid: VLAN ID to set VLAN promiscuous */ -enum ice_status +int ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; struct ice_fltr_list_entry f_list_entry; struct ice_fltr_info new_fltr; - enum ice_status status = 0; bool is_tx_fltr; + int status = 0; u16 hw_vsi_id; int pkt_type; u8 recipe_id; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); memset(&new_fltr, 0, sizeof(new_fltr)); @@ -3558,7 +3553,7 @@ set_promisc_exit: * * Configure VSI with all associated VLANs to given promiscuous mode(s) */ -enum ice_status +int ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc) { @@ -3567,8 +3562,8 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, struct list_head vsi_list_head; struct list_head *vlan_head; struct mutex *vlan_lock; /* Lock to protect filter rule list */ - enum ice_status status; u16 vlan_id; + int status; INIT_LIST_HEAD(&vsi_list_head); vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; @@ -3616,7 +3611,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, struct list_head *rule_head; struct ice_fltr_list_entry *tmp; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status; + int status; INIT_LIST_HEAD(&remove_list_head); rule_lock = &sw->recp_list[lkup].filt_rule_lock; @@ -3681,19 +3676,19 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) * @num_items: number of entries requested for FD resource type * @counter_id: counter index returned by AQ call */ -enum ice_status +int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; /* Allocate resource */ buf_len = struct_size(buf, elem, 1); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & @@ -3719,19 +3714,19 @@ exit: * @num_items: number of entries to be freed for FD resource type * @counter_id: counter ID resource which needs to be freed */ -enum ice_status +int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; /* Free resource */ buf_len = struct_size(buf, elem, 1); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & @@ -3796,10 +3791,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { * ice_find_recp - find a recipe * @hw: pointer to the hardware structure * @lkup_exts: extension sequence to match + * @tun_type: type of recipe tunnel * * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. */ -static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) +static u16 +ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, + enum ice_sw_tunnel_type tun_type) { bool refresh_required = true; struct ice_sw_recipe *recp; @@ -3860,8 +3858,9 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) } /* If for "i"th recipe the found was never set to false * then it means we found our match + * Also tun type of recipe needs to be checked */ - if (found) + if (found && recp[i].tun_type == tun_type) return i; /* Return the recipe ID */ } } @@ -3937,7 +3936,7 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule, * and start grouping them in 4-word groups. Each group makes up one * recipe. */ -static enum ice_status +static int ice_create_first_fit_recp_def(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, struct list_head *rg_list, @@ -3961,7 +3960,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw, sizeof(*entry), GFP_KERNEL); if (!entry) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; list_add(&entry->l_entry, rg_list); grp = &entry->r_group; (*recp_cnt)++; @@ -3987,7 +3986,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw, * Helper function to fill in the field vector indices for protocol-offset * pairs. These indexes are then ultimately programmed into a recipe. */ -static enum ice_status +static int ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, struct list_head *rg_list) { @@ -4029,7 +4028,7 @@ ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, * invalid pair */ if (!found) - return ICE_ERR_PARAM; + return -EINVAL; } } @@ -4111,7 +4110,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, * @rm: recipe management list entry * @profiles: bitmap of profiles that will be associated. */ -static enum ice_status +static int ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, unsigned long *profiles) { @@ -4119,11 +4118,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, struct ice_aqc_recipe_data_elem *tmp; struct ice_aqc_recipe_data_elem *buf; struct ice_recp_grp_entry *entry; - enum ice_status status; u16 free_res_idx; u16 recipe_count; u8 chain_idx; u8 recps = 0; + int status; /* When more than one recipe are required, another recipe is needed to * chain them together. Matching a tunnel metadata ID takes up one of @@ -4139,22 +4138,22 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, if (rm->n_grp_count > 1) { if (rm->n_grp_count > free_res_idx) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; rm->n_grp_count++; } if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), GFP_KERNEL); if (!buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_mem; } @@ -4214,7 +4213,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, */ if (chain_idx >= ICE_MAX_FV_WORDS) { ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); - status = ICE_ERR_MAX_LIMIT; + status = -ENOSPC; goto err_unroll; } @@ -4245,7 +4244,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, memcpy(buf[0].recipe_bitmap, rm->r_bitmap, sizeof(buf[0].recipe_bitmap)); } else { - status = ICE_ERR_BAD_PTR; + status = -EINVAL; goto err_unroll; } /* Applicable only for ROOT_RECIPE, set the fwd_priority for @@ -4281,7 +4280,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, sizeof(*last_chain_entry), GFP_KERNEL); if (!last_chain_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll; } last_chain_entry->rid = rid; @@ -4316,7 +4315,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, sizeof(buf[recps].recipe_bitmap)); } else { - status = ICE_ERR_BAD_PTR; + status = -EINVAL; goto err_unroll; } buf[recps].content.act_ctrl_fwd_priority = rm->priority; @@ -4350,7 +4349,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, } if (!idx_found) { - status = ICE_ERR_OUT_OF_RANGE; + status = -EIO; goto err_unroll; } @@ -4403,12 +4402,12 @@ err_mem: * @rm: recipe management list entry * @lkup_exts: lookup elements */ -static enum ice_status +static int ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, struct ice_prot_lkup_ext *lkup_exts) { - enum ice_status status; u8 recp_count = 0; + int status; rm->n_grp_count = 0; @@ -4438,21 +4437,21 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, * @bm: bitmap of field vectors to consider * @fv_list: pointer to a list that holds the returned field vectors */ -static enum ice_status +static int ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, unsigned long *bm, struct list_head *fv_list) { - enum ice_status status; u8 *prot_ids; + int status; u16 i; prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL); if (!prot_ids) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < lkups_cnt; i++) if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { - status = ICE_ERR_CFG; + status = -EIO; goto free_mem; } @@ -4489,7 +4488,7 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) * @rinfo: other information regarding the rule e.g. priority and action info * @lkup_exts: lookup word structure */ -static enum ice_status +static int ice_add_special_words(struct ice_adv_rule_info *rinfo, struct ice_prot_lkup_ext *lkup_exts) { @@ -4506,7 +4505,7 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo, lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; lkup_exts->field_mask[word] = mask; } else { - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; } } @@ -4557,7 +4556,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, * @rinfo: other information regarding the rule e.g. priority and action info * @rid: return the recipe ID of the recipe created */ -static enum ice_status +static int ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) { @@ -4568,16 +4567,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_sw_fv_list_entry *fvit; struct ice_recp_grp_entry *r_tmp; struct ice_sw_fv_list_entry *tmp; - enum ice_status status = 0; struct ice_sw_recipe *rm; + int status = 0; u8 i; if (!lkups_cnt) - return ICE_ERR_PARAM; + return -EINVAL; lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); if (!lkup_exts) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Determine the number of words to be matched and if it exceeds a * recipe's restrictions @@ -4586,20 +4585,20 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 count; if (lkups[i].type >= ICE_PROTOCOL_LAST) { - status = ICE_ERR_CFG; + status = -EIO; goto err_free_lkup_exts; } count = ice_fill_valid_words(&lkups[i], lkup_exts); if (!count) { - status = ICE_ERR_CFG; + status = -EIO; goto err_free_lkup_exts; } } rm = kzalloc(sizeof(*rm), GFP_KERNEL); if (!rm) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_free_lkup_exts; } @@ -4651,11 +4650,12 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } /* Look for a recipe which matches our requested fv / mask list */ - *rid = ice_find_recp(hw, lkup_exts); + *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); if (*rid < ICE_MAX_NUM_RECIPES) /* Success if found a recipe that match the existing criteria */ goto err_unroll; + rm->tun_type = rinfo->tun_type; /* Recipe we need does not exist, add a recipe */ status = ice_add_sw_recipe(hw, rm, profiles); if (status) @@ -4844,7 +4844,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * @pkt_len: packet length of dummy packet * @offsets: offset info for the dummy packet */ -static enum ice_status +static int ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_aqc_sw_rules_elem *s_rule, const u8 *dummy_pkt, u16 pkt_len, @@ -4878,7 +4878,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, } /* this should never happen in a correct calling sequence */ if (!found) - return ICE_ERR_PARAM; + return -EINVAL; switch (lkups[i].type) { case ICE_MAC_OFOS: @@ -4915,12 +4915,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, len = sizeof(struct ice_udp_tnl_hdr); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } /* the length should be a word multiple */ if (len % ICE_BYTES_PER_WORD) - return ICE_ERR_CFG; + return -EIO; /* We have the offset to the header start, the length, the * caller's header values and mask. Use this information to @@ -4950,7 +4950,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * @pkt: dummy packet to fill in * @offsets: offset info for the dummy packet */ -static enum ice_status +static int ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) { @@ -4958,11 +4958,13 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, switch (tun_type) { case ICE_SW_TUN_VXLAN: + if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN)) + return -EIO; + break; case ICE_SW_TUN_GENEVE: - if (!ice_get_open_tunnel_port(hw, &open_port)) - return ICE_ERR_CFG; + if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE)) + return -EIO; break; - default: /* Nothing needs to be done for this tunnel type */ return 0; @@ -4982,7 +4984,7 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, } } - return ICE_ERR_CFG; + return -EIO; } /** @@ -5047,25 +5049,25 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * Add the new VSI to the previously created VSI list set * using the update switch rule command */ -static enum ice_status +static int ice_adv_add_update_vsi_list(struct ice_hw *hw, struct ice_adv_fltr_mgmt_list_entry *m_entry, struct ice_adv_rule_info *cur_fltr, struct ice_adv_rule_info *new_fltr) { - enum ice_status status; u16 vsi_list_id = 0; + int status; if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { /* Only one entry existed in the mapping and it was not already @@ -5078,7 +5080,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (cur_fltr->sw_act.fwd_id.hw_vsi_id == new_fltr->sw_act.fwd_id.hw_vsi_id) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; @@ -5111,7 +5113,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, u16 vsi_handle = new_fltr->sw_act.vsi_handle; if (!m_entry->vsi_list_info) - return ICE_ERR_CFG; + return -EIO; /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) @@ -5153,7 +5155,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, * rinfo describes other information related to this rule such as forwarding * IDs, priority of this rule, etc. */ -enum ice_status +int ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, struct ice_rule_query_data *added_entry) @@ -5164,10 +5166,10 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_aqc_sw_rules_elem *s_rule = NULL; struct list_head *rule_head; struct ice_switch_info *sw; - enum ice_status status; const u8 *pkt = NULL; u16 word_cnt; u32 act = 0; + int status; u8 q_rgn; /* Initialize profile to result index bitmap */ @@ -5177,7 +5179,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } if (!lkups_cnt) - return ICE_ERR_PARAM; + return -EINVAL; /* get # of words we need to match */ word_cnt = 0; @@ -5191,13 +5193,13 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) - return ICE_ERR_PARAM; + return -EINVAL; /* make sure that we can locate a dummy packet */ ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, &pkt_offsets); if (!pkt) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_ice_add_adv_rule; } @@ -5205,11 +5207,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) - return ICE_ERR_CFG; + return -EIO; vsi_handle = rinfo->sw_act.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) rinfo->sw_act.fwd_id.hw_vsi_id = @@ -5243,7 +5245,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; if (!rinfo->flags_info.act_valid) { act |= ICE_SINGLE_ACT_LAN_ENABLE; act |= ICE_SINGLE_ACT_LB_ENABLE; @@ -5277,7 +5279,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ICE_SINGLE_ACT_VALID_BIT; break; default: - status = ICE_ERR_CFG; + status = -EIO; goto err_ice_add_adv_rule; } @@ -5322,14 +5324,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, sizeof(struct ice_adv_fltr_mgmt_list_entry), GFP_KERNEL); if (!adv_fltr) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_adv_rule; } adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, lkups_cnt * sizeof(*lkups), GFP_KERNEL); if (!adv_fltr->lkups) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_adv_rule; } @@ -5372,12 +5374,12 @@ err_ice_add_adv_rule: * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. * It is required to pass valid VSI handle. */ -static enum ice_status +static int ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, struct list_head *list_head) { struct ice_fltr_mgmt_list_entry *itr; - enum ice_status status = 0; + int status = 0; u16 hw_vsi_id; if (list_empty(list_head)) @@ -5426,22 +5428,22 @@ end: * @fm_list: filter management entry for which the VSI list management needs to * be done */ -static enum ice_status +static int ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, struct ice_adv_fltr_mgmt_list_entry *fm_list) { struct ice_vsi_list_map_info *vsi_list_info; enum ice_sw_lkup_type lkup_type; - enum ice_status status; u16 vsi_list_id; + int status; if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || fm_list->vsi_count == 0) - return ICE_ERR_PARAM; + return -EINVAL; /* A rule with the VSI being removed does not exist */ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; lkup_type = ICE_SW_LKUP_LAST; vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; @@ -5461,7 +5463,7 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, ICE_MAX_VSI); if (!ice_is_vsi_valid(hw, rem_vsi_handle)) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* Make sure VSI list is empty before removing it below */ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, @@ -5525,27 +5527,27 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, * header. rinfo describes other information related to this rule such as * forwarding IDs, priority of this rule, etc. */ -static enum ice_status +static int ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo) { struct ice_adv_fltr_mgmt_list_entry *list_elem; struct ice_prot_lkup_ext lkup_exts; - enum ice_status status = 0; bool remove_rule = false; struct mutex *rule_lock; /* Lock to protect filter rule list */ u16 i, rid, vsi_handle; + int status = 0; memset(&lkup_exts, 0, sizeof(lkup_exts)); for (i = 0; i < lkups_cnt; i++) { u16 count; if (lkups[i].type >= ICE_PROTOCOL_LAST) - return ICE_ERR_CFG; + return -EIO; count = ice_fill_valid_words(&lkups[i], &lkup_exts); if (!count) - return ICE_ERR_CFG; + return -EIO; } /* Create any special protocol/offset pairs, such as looking at tunnel @@ -5555,10 +5557,10 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) return status; - rid = ice_find_recp(hw, &lkup_exts); + rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) - return ICE_ERR_PARAM; + return -EINVAL; rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); @@ -5590,7 +5592,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; s_rule->pdata.lkup_tx_rx.act = 0; s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(list_elem->rule_info.fltr_rule_id); @@ -5598,7 +5600,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, rule_buf_sz, 1, ice_aqc_opc_remove_sw_rules, NULL); - if (!status || status == ICE_ERR_DOES_NOT_EXIST) { + if (!status || status == -ENOENT) { struct ice_switch_info *sw = hw->switch_info; mutex_lock(rule_lock); @@ -5623,7 +5625,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * the remove_entry parameter. This function will remove rule for a given * vsi_handle with a given rule_id which is passed as parameter in remove_entry */ -enum ice_status +int ice_rem_adv_rule_by_id(struct ice_hw *hw, struct ice_rule_query_data *remove_entry) { @@ -5634,7 +5636,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, sw = hw->switch_info; if (!sw->recp_list[remove_entry->rid].recp_created) - return ICE_ERR_PARAM; + return -EINVAL; list_head = &sw->recp_list[remove_entry->rid].filt_rules; list_for_each_entry(list_itr, list_head, list_entry) { if (list_itr->rule_info.fltr_rule_id == @@ -5646,7 +5648,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, } } /* either list is empty or unable to find rule */ - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -5656,10 +5658,10 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, * * Replays filters for requested VSI via vsi_handle. */ -enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) { struct ice_switch_info *sw = hw->switch_info; - enum ice_status status = 0; + int status = 0; u8 i; for (i = 0; i < ICE_SW_LKUP_LAST; i++) { diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index d8a38906f16f..d8334beaaa8a 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -256,7 +256,7 @@ struct ice_vsi_list_map_info { struct ice_fltr_list_entry { struct list_head list_entry; - enum ice_status status; + int status; struct ice_fltr_info fltr_info; }; @@ -302,75 +302,69 @@ enum ice_promisc_flags { }; /* VSI related commands */ -enum ice_status +int ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); -enum ice_status +int ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd); -enum ice_status +int ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); void ice_clear_all_vsi_ctx(struct ice_hw *hw); /* Switch config */ -enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); +int ice_get_initial_sw_cfg(struct ice_hw *hw); -enum ice_status +int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id); -enum ice_status +int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id); /* Switch/bridge related commands */ -enum ice_status +int ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, struct ice_rule_query_data *added_entry); -enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); -enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); -enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); -enum ice_status -ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list); -enum ice_status -ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list); -int -ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable); +int ice_update_sw_rule_bridge_mode(struct ice_hw *hw); +int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); +int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); +int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); +int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle); bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle); +int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list); +int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list); +int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); -enum ice_status -ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); -enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); /* Promisc/defport setup for VSIs */ -enum ice_status -ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction); -enum ice_status +int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction); +int ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid); -enum ice_status +int ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid); -enum ice_status +int ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc); -enum ice_status -ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); -enum ice_status +int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); +int ice_rem_adv_rule_by_id(struct ice_hw *hw, struct ice_rule_query_data *remove_entry); -enum ice_status ice_init_def_sw_recp(struct ice_hw *hw); +int ice_init_def_sw_recp(struct ice_hw *hw); u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); -enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); +int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); -enum ice_status +int ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index e5d23feb6701..e8aab664270a 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -74,21 +74,13 @@ static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner) return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS; } -static enum ice_protocol_type -ice_proto_type_from_l4_port(bool inner, u16 ip_proto) +static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto) { - if (inner) { - switch (ip_proto) { - case IPPROTO_UDP: - return ICE_UDP_ILOS; - } - } else { - switch (ip_proto) { - case IPPROTO_TCP: - return ICE_TCP_IL; - case IPPROTO_UDP: - return ICE_UDP_OF; - } + switch (ip_proto) { + case IPPROTO_TCP: + return ICE_TCP_IL; + case IPPROTO_UDP: + return ICE_UDP_ILOS; } return 0; @@ -191,8 +183,9 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } - if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) { - list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto); + if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) && + hdr->l3_key.ip_proto == IPPROTO_UDP) { + list[i].type = ICE_UDP_OF; list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port; list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port; i++; @@ -317,7 +310,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { struct ice_tc_l4_hdr *l4_key, *l4_mask; - list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto); + list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto); l4_key = &headers->l4_key; l4_mask = &headers->l4_mask; @@ -405,9 +398,8 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) struct ice_hw *hw = &vsi->back->hw; struct ice_adv_lkup_elem *list; u32 flags = fltr->flags; - enum ice_status status; int lkups_cnt; - int ret = 0; + int ret; int i; if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) { @@ -456,14 +448,13 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) /* specify the cookie as filter_rule_id */ rule_info.fltr_rule_id = fltr->cookie; - status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); - if (status == ICE_ERR_ALREADY_EXISTS) { + ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); + if (ret == -EEXIST) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; - } else if (status) { + } else if (ret) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); - ret = -EIO; goto exit; } @@ -802,7 +793,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, headers->l3_mask.ttl = match.mask->ttl; } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) && + fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) { struct flow_match_ports match; flow_rule_match_enc_ports(rule, &match); @@ -1168,7 +1160,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_rem.vsi_handle = fltr->dest_id; err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem); if (err) { - if (err == ICE_ERR_DOES_NOT_EXIST) { + if (err == -ENOENT) { NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist"); return -ENOENT; } diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 9e0c2923c62e..b8644adfba78 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -7,7 +7,6 @@ #define ICE_BYTES_PER_WORD 2 #define ICE_BYTES_PER_DWORD 4 -#include "ice_status.h" #include "ice_hw_autogen.h" #include "ice_osdep.h" #include "ice_controlq.h" @@ -873,8 +872,6 @@ struct ice_hw { u8 active_pkg_name[ICE_PKG_NAME_SIZE]; u8 active_pkg_in_nvm; - enum ice_aq_err pkg_dwnld_status; - /* Driver's package ver - (from the Ice Metadata section) */ struct ice_pkg_ver pkg_ver; u8 pkg_name[ICE_PKG_NAME_SIZE]; @@ -919,6 +916,7 @@ struct ice_hw { struct mutex rss_locks; /* protect RSS configuration */ struct list_head rss_list_head; struct ice_mbx_snapshot mbx_snapshot; + DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX); u16 io_expander_handle; }; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index eee180d8c024..d64df81d4893 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -47,197 +47,6 @@ struct virtchnl_fdir_fltr_conf { u32 flow_id; }; -static enum virtchnl_proto_hdr_type vc_pattern_ether[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_TCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_SCTP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_TCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_SCTP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_GTPU_IP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_GTPU_IP, - VIRTCHNL_PROTO_HDR_GTPU_EH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_L2TPV3, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_L2TPV3, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_AH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_AH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_PFCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_PFCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -struct virtchnl_fdir_pattern_match_item { - enum virtchnl_proto_hdr_type *list; - u64 input_set; - u64 *meta; -}; - -static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = { - {vc_pattern_ipv4, 0, NULL}, - {vc_pattern_ipv4_tcp, 0, NULL}, - {vc_pattern_ipv4_udp, 0, NULL}, - {vc_pattern_ipv4_sctp, 0, NULL}, - {vc_pattern_ipv6, 0, NULL}, - {vc_pattern_ipv6_tcp, 0, NULL}, - {vc_pattern_ipv6_udp, 0, NULL}, - {vc_pattern_ipv6_sctp, 0, NULL}, -}; - -static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = { - {vc_pattern_ipv4, 0, NULL}, - {vc_pattern_ipv4_tcp, 0, NULL}, - {vc_pattern_ipv4_udp, 0, NULL}, - {vc_pattern_ipv4_sctp, 0, NULL}, - {vc_pattern_ipv6, 0, NULL}, - {vc_pattern_ipv6_tcp, 0, NULL}, - {vc_pattern_ipv6_udp, 0, NULL}, - {vc_pattern_ipv6_sctp, 0, NULL}, - {vc_pattern_ether, 0, NULL}, - {vc_pattern_ipv4_gtpu, 0, NULL}, - {vc_pattern_ipv4_gtpu_eh, 0, NULL}, - {vc_pattern_ipv4_l2tpv3, 0, NULL}, - {vc_pattern_ipv6_l2tpv3, 0, NULL}, - {vc_pattern_ipv4_esp, 0, NULL}, - {vc_pattern_ipv6_esp, 0, NULL}, - {vc_pattern_ipv4_ah, 0, NULL}, - {vc_pattern_ipv6_ah, 0, NULL}, - {vc_pattern_ipv4_nat_t_esp, 0, NULL}, - {vc_pattern_ipv6_nat_t_esp, 0, NULL}, - {vc_pattern_ipv4_pfcp, 0, NULL}, - {vc_pattern_ipv6_pfcp, 0, NULL}, -}; - struct virtchnl_fdir_inset_map { enum virtchnl_proto_hdr_field field; enum ice_flow_field fld; @@ -751,7 +560,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, struct ice_flow_seg_info *old_seg; struct ice_flow_prof *prof = NULL; struct ice_fd_hw_prof *vf_prof; - enum ice_status status; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; @@ -794,29 +602,26 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, tun ? ICE_FLTR_PTYPE_MAX : 0); - status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - tun + 1, &prof); - ret = ice_status_to_errno(status); + ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, + tun + 1, &prof); if (ret) { dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", flow, vf->vf_id); goto err_exit; } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, - vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry1_h); - ret = ice_status_to_errno(status); + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry1_h); if (ret) { dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", flow, vf->vf_id); goto err_prof; } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, - ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry2_h); - ret = ice_status_to_errno(status); + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry2_h); if (ret) { dev_dbg(dev, "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", @@ -911,83 +716,6 @@ err_exit: } /** - * ice_vc_fdir_match_pattern - * @fltr: virtual channel add cmd buffer - * @type: virtual channel protocol filter header type - * - * Matching the header type by comparing fltr and type's value. - * - * Return: true on success, and false on error. - */ -static bool -ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr, - enum virtchnl_proto_hdr_type *type) -{ - struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; - int i = 0; - - while ((i < proto->count) && - (*type == proto->proto_hdr[i].type) && - (*type != VIRTCHNL_PROTO_HDR_NONE)) { - type++; - i++; - } - - return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE)); -} - -/** - * ice_vc_fdir_get_pattern - get while list pattern - * @vf: pointer to the VF info - * @len: filter list length - * - * Return: pointer to allowed filter list - */ -static const struct virtchnl_fdir_pattern_match_item * -ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len) -{ - const struct virtchnl_fdir_pattern_match_item *item; - struct ice_pf *pf = vf->pf; - struct ice_hw *hw; - - hw = &pf->hw; - if (!strncmp(hw->active_pkg_name, "ICE COMMS Package", - sizeof(hw->active_pkg_name))) { - item = vc_fdir_pattern_comms; - *len = ARRAY_SIZE(vc_fdir_pattern_comms); - } else { - item = vc_fdir_pattern_os; - *len = ARRAY_SIZE(vc_fdir_pattern_os); - } - - return item; -} - -/** - * ice_vc_fdir_search_pattern - * @vf: pointer to the VF info - * @fltr: virtual channel add cmd buffer - * - * Search for matched pattern from supported pattern list - * - * Return: 0 on success, and other on error. - */ -static int -ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr) -{ - const struct virtchnl_fdir_pattern_match_item *pattern; - int len, i; - - pattern = ice_vc_fdir_get_pattern(vf, &len); - - for (i = 0; i < len; i++) - if (ice_vc_fdir_match_pattern(fltr, pattern[i].list)) - return 0; - - return -EINVAL; -} - -/** * ice_vc_fdir_parse_pattern * @vf: pointer to the VF info * @fltr: virtual channel add cmd buffer @@ -1299,11 +1027,11 @@ static int ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, struct virtchnl_fdir_fltr_conf *conf) { + struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; int ret; - ret = ice_vc_fdir_search_pattern(vf, fltr); - if (ret) - return ret; + if (!ice_vc_validate_pattern(vf, proto)) + return -EINVAL; ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); if (ret) @@ -1467,7 +1195,6 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf, struct ice_fdir_fltr *input = &conf->input; struct ice_vsi *vsi, *ctrl_vsi; struct ice_fltr_desc desc; - enum ice_status status; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; @@ -1497,8 +1224,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf, return -ENOMEM; ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); - ret = ice_status_to_errno(status); + ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); if (ret) { dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", vf->vf_id, input->flow_type); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 217ff5e9a6f1..61b2db3342ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -9,6 +9,7 @@ #include "ice_flow.h" #include "ice_eswitch.h" #include "ice_virtchnl_allowlist.h" +#include "ice_flex_pipe.h" #define FIELD_SELECTOR(proto_hdr_field) \ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK) @@ -18,18 +19,7 @@ struct ice_vc_hdr_match_type { u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */ }; -static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = { - {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE}, - {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 | - ICE_FLOW_SEG_HDR_IPV_OTHER}, - {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 | - ICE_FLOW_SEG_HDR_IPV_OTHER}, - {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP}, - {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP}, - {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP}, -}; - -static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = { +static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = { {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE}, {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH}, {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN}, @@ -67,83 +57,7 @@ struct ice_vc_hash_field_match_type { }; static const struct -ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = { - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), - ICE_FLOW_HASH_IPV4}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), - ICE_FLOW_HASH_IPV6}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_TCP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, - {VIRTCHNL_PROTO_HDR_TCP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, - {VIRTCHNL_PROTO_HDR_TCP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), - ICE_FLOW_HASH_TCP_PORT}, - {VIRTCHNL_PROTO_HDR_UDP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, - {VIRTCHNL_PROTO_HDR_UDP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, - {VIRTCHNL_PROTO_HDR_UDP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), - ICE_FLOW_HASH_UDP_PORT}, - {VIRTCHNL_PROTO_HDR_SCTP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, - {VIRTCHNL_PROTO_HDR_SCTP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, - {VIRTCHNL_PROTO_HDR_SCTP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), - ICE_FLOW_HASH_SCTP_PORT}, -}; - -static const struct -ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = { +ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC), BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)}, {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), @@ -289,37 +203,6 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) } /** - * ice_err_to_virt_err - translate errors for VF return code - * @ice_err: error return code - */ -static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err) -{ - switch (ice_err) { - case ICE_SUCCESS: - return VIRTCHNL_STATUS_SUCCESS; - case ICE_ERR_BAD_PTR: - case ICE_ERR_INVAL_SIZE: - case ICE_ERR_DEVICE_NOT_SUPPORTED: - case ICE_ERR_PARAM: - case ICE_ERR_CFG: - return VIRTCHNL_STATUS_ERR_PARAM; - case ICE_ERR_NO_MEMORY: - return VIRTCHNL_STATUS_ERR_NO_MEMORY; - case ICE_ERR_NOT_READY: - case ICE_ERR_RESET_FAILED: - case ICE_ERR_FW_API_VER: - case ICE_ERR_AQ_ERROR: - case ICE_ERR_AQ_TIMEOUT: - case ICE_ERR_AQ_FULL: - case ICE_ERR_AQ_NO_WORK: - case ICE_ERR_AQ_EMPTY: - return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; - default: - return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; - } -} - -/** * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF * @pf: pointer to the PF structure * @v_opcode: operation code @@ -770,8 +653,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable) struct ice_hw *hw = &vsi->back->hw; struct ice_aqc_vsi_props *info; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) @@ -794,12 +676,10 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable) info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | ICE_AQ_VSI_PROP_SW_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -968,8 +848,8 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) { struct device *dev = ice_pf_to_dev(vf->pf); struct ice_vsi *vsi = ice_get_vf_vsi(vf); - enum ice_status status; u8 broadcast[ETH_ALEN]; + int status; if (ice_is_eswitch_mode_switchdev(vf->pf)) return 0; @@ -977,9 +857,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) eth_broadcast_addr(broadcast); status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); if (status) { - dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n", - vf->vf_id, ice_stat_str(status)); - return ice_status_to_errno(status); + dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", + vf->vf_id, status); + return status; } vf->num_mac++; @@ -988,10 +868,10 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); if (status) { - dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n", + dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", &vf->hw_lan_addr.addr[0], vf->vf_id, - ice_stat_str(status)); - return ice_status_to_errno(status); + status); + return status; } vf->num_mac++; @@ -1341,45 +1221,50 @@ static void ice_clear_vf_reset_trigger(struct ice_vf *vf) ice_flush(hw); } -/** - * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s) - * @vf: pointer to the VF info - * @vsi: the VSI being configured - * @promisc_m: mask of promiscuous config bits - * @rm_promisc: promisc flag request from the VF to remove or add filter - * - * This function configures VF VSI promiscuous mode, based on the VF requests, - * for Unicast, Multicast and VLAN - */ -static enum ice_status -ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, - bool rm_promisc) +static int +ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) { - struct ice_pf *pf = vf->pf; - enum ice_status status = 0; - struct ice_hw *hw; + struct ice_hw *hw = &vsi->back->hw; + int status; - hw = &pf->hw; - if (vsi->num_vlan) { - status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, - rm_promisc); - } else if (vf->port_vlan_info) { - if (rm_promisc) - status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, - vf->port_vlan_info); - else - status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, - vf->port_vlan_info); - } else { - if (rm_promisc) - status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, - 0); - else - status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, - 0); + if (vf->port_vlan_info) + status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, + vf->port_vlan_info & VLAN_VID_MASK); + else if (vsi->num_vlan > 1) + status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); + else + status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0); + + if (status && status != -EEXIST) { + dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; } - return status; + return 0; +} + +static int +ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) +{ + struct ice_hw *hw = &vsi->back->hw; + int status; + + if (vf->port_vlan_info) + status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, + vf->port_vlan_info & VLAN_VID_MASK); + else if (vsi->num_vlan > 1) + status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); + else + status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0); + + if (status && status != -ENOENT) { + dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; + } + + return 0; } static void ice_vf_clear_counters(struct ice_vf *vf) @@ -1415,8 +1300,8 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; if (!vsi->agg_node) return; @@ -1617,6 +1502,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_vc_set_default_allowlist(vf); ice_vf_fdir_exit(vf); + ice_vf_fdir_init(vf); /* clean VF control VSI when resetting VFs since it should be * setup only when VF creates its first FDIR rule. */ @@ -1742,11 +1628,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) else promisc_m = ICE_UCAST_PROMISC_BITS; - if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true)) + if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m)) dev_err(dev, "disabling promiscuous mode failed\n"); } ice_vf_fdir_exit(vf); + ice_vf_fdir_init(vf); /* clean VF control VSI when resetting VF since it should be setup * only when VF creates its first FDIR rule. */ @@ -1844,7 +1731,6 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf) { struct ice_pf *pf = vf->pf; u8 broadcast[ETH_ALEN]; - enum ice_status status; struct ice_vsi *vsi; struct device *dev; int err; @@ -1864,11 +1750,10 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf) } eth_broadcast_addr(broadcast); - status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); - if (status) { - dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n", - vf->vf_id, ice_stat_str(status)); - err = ice_status_to_errno(status); + err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); + if (err) { + dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n", + vf->vf_id, err); goto release_vsi; } @@ -2021,6 +1906,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) if (ret) goto err_unroll_sriov; + /* rearm global interrupts */ + if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) + ice_irq_dynamic_ena(hw, NULL, NULL); + return 0; err_unroll_sriov: @@ -2110,7 +1999,6 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct ice_pf *pf = pci_get_drvdata(pdev); struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; int err; err = ice_check_sriov_allowed(pf); @@ -2130,9 +2018,9 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) return -EBUSY; } - status = ice_mbx_init_snapshot(&pf->hw, num_vfs); - if (status) - return ice_status_to_errno(status); + err = ice_mbx_init_snapshot(&pf->hw, num_vfs); + if (err) + return err; err = ice_pci_sriov_ena(pf, num_vfs); if (err) { @@ -2266,9 +2154,9 @@ int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { - enum ice_status aq_ret; struct device *dev; struct ice_pf *pf; + int aq_ret; if (!vf) return -EINVAL; @@ -2300,8 +2188,8 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { - dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n", - vf->vf_id, ice_stat_str(aq_ret), + dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n", + vf->vf_id, aq_ret, ice_aq_str(pf->hw.mailboxq.sq_last_status)); return -EIO; } @@ -2550,6 +2438,100 @@ static bool ice_vc_isvalid_ring_len(u16 ring_len) } /** + * ice_vc_validate_pattern + * @vf: pointer to the VF info + * @proto: virtchnl protocol headers + * + * validate the pattern is supported or not. + * + * Return: true on success, false on error. + */ +bool +ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto) +{ + bool is_ipv4 = false; + bool is_ipv6 = false; + bool is_udp = false; + u16 ptype = -1; + int i = 0; + + while (i < proto->count && + proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) { + switch (proto->proto_hdr[i].type) { + case VIRTCHNL_PROTO_HDR_ETH: + ptype = ICE_PTYPE_MAC_PAY; + break; + case VIRTCHNL_PROTO_HDR_IPV4: + ptype = ICE_PTYPE_IPV4_PAY; + is_ipv4 = true; + break; + case VIRTCHNL_PROTO_HDR_IPV6: + ptype = ICE_PTYPE_IPV6_PAY; + is_ipv6 = true; + break; + case VIRTCHNL_PROTO_HDR_UDP: + if (is_ipv4) + ptype = ICE_PTYPE_IPV4_UDP_PAY; + else if (is_ipv6) + ptype = ICE_PTYPE_IPV6_UDP_PAY; + is_udp = true; + break; + case VIRTCHNL_PROTO_HDR_TCP: + if (is_ipv4) + ptype = ICE_PTYPE_IPV4_TCP_PAY; + else if (is_ipv6) + ptype = ICE_PTYPE_IPV6_TCP_PAY; + break; + case VIRTCHNL_PROTO_HDR_SCTP: + if (is_ipv4) + ptype = ICE_PTYPE_IPV4_SCTP_PAY; + else if (is_ipv6) + ptype = ICE_PTYPE_IPV6_SCTP_PAY; + break; + case VIRTCHNL_PROTO_HDR_GTPU_IP: + case VIRTCHNL_PROTO_HDR_GTPU_EH: + if (is_ipv4) + ptype = ICE_MAC_IPV4_GTPU; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_GTPU; + goto out; + case VIRTCHNL_PROTO_HDR_L2TPV3: + if (is_ipv4) + ptype = ICE_MAC_IPV4_L2TPV3; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_L2TPV3; + goto out; + case VIRTCHNL_PROTO_HDR_ESP: + if (is_ipv4) + ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP : + ICE_MAC_IPV4_ESP; + else if (is_ipv6) + ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP : + ICE_MAC_IPV6_ESP; + goto out; + case VIRTCHNL_PROTO_HDR_AH: + if (is_ipv4) + ptype = ICE_MAC_IPV4_AH; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_AH; + goto out; + case VIRTCHNL_PROTO_HDR_PFCP: + if (is_ipv4) + ptype = ICE_MAC_IPV4_PFCP_SESSION; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_PFCP_SESSION; + goto out; + default: + break; + } + i++; + } + +out: + return ice_hw_ptype_ena(&vf->pf->hw, ptype); +} + +/** * ice_vc_parse_rss_cfg - parses hash fields and headers from * a specific virtchnl RSS cfg * @hw: pointer to the hardware @@ -2572,18 +2554,10 @@ ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg, const struct ice_vc_hdr_match_type *hdr_list; int i, hf_list_len, hdr_list_len; - if (!strncmp(hw->active_pkg_name, "ICE COMMS Package", - sizeof(hw->active_pkg_name))) { - hf_list = ice_vc_hash_field_list_comms; - hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms); - hdr_list = ice_vc_hdr_list_comms; - hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms); - } else { - hf_list = ice_vc_hash_field_list_os; - hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os); - hdr_list = ice_vc_hdr_list_os; - hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os); - } + hf_list = ice_vc_hash_field_list; + hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list); + hdr_list = ice_vc_hdr_list; + hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list); for (i = 0; i < rss_cfg->proto_hdrs.count; i++) { struct virtchnl_proto_hdr *proto_hdr = @@ -2685,10 +2659,15 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) goto error_param; } + if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { struct ice_vsi_ctx *ctx; - enum ice_status status; u8 lut_type, hash_type; + int status; lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR : @@ -2717,9 +2696,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) status = ice_update_vsi(hw, vsi->idx, ctx, NULL); if (status) { - dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); v_ret = VIRTCHNL_STATUS_ERR_PARAM; } else { vsi->info.q_opt_rss = ctx->info.q_opt_rss; @@ -2744,19 +2722,18 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) vsi->vsi_num, v_ret); } } else { - enum ice_status status; + int status; status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds, addl_hdrs); - /* We just ignore ICE_ERR_DOES_NOT_EXIST, because - * if two configurations share the same profile remove - * one of them actually removes both, since the - * profile is deleted. + /* We just ignore -ENOENT, because if two configurations + * share the same profile remove one of them actually + * removes both, since the profile is deleted. */ - if (status && status != ICE_ERR_DOES_NOT_EXIST) { + if (status && status != -ENOENT) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n", - vf->vf_id, ice_stat_str(status)); + dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n", + vf->vf_id, status); } } } @@ -2914,7 +2891,6 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) struct ice_pf *pf = np->vsi->back; struct ice_vsi_ctx *ctx; struct ice_vsi *vf_vsi; - enum ice_status status; struct device *dev; struct ice_vf *vf; int ret; @@ -2964,12 +2940,10 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)); } - status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); - if (status) { - dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n", - ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, - ice_stat_str(status)); - ret = -EIO; + ret = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); + if (ret) { + dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n", + ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, ret); goto out; } @@ -3015,10 +2989,10 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf) static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - enum ice_status mcast_status = 0, ucast_status = 0; bool rm_promisc, alluni = false, allmulti = false; struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)msg; + int mcast_err = 0, ucast_err = 0; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; struct device *dev; @@ -3100,24 +3074,21 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) ucast_m = ICE_UCAST_PROMISC_BITS; } - ucast_status = ice_vf_set_vsi_promisc(vf, vsi, ucast_m, - !alluni); - if (ucast_status) { - dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n", - alluni ? "en" : "dis", vf->vf_id); - v_ret = ice_err_to_virt_err(ucast_status); - } + if (alluni) + ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m); + else + ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); - mcast_status = ice_vf_set_vsi_promisc(vf, vsi, mcast_m, - !allmulti); - if (mcast_status) { - dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n", - allmulti ? "en" : "dis", vf->vf_id); - v_ret = ice_err_to_virt_err(mcast_status); - } + if (allmulti) + mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m); + else + mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); + + if (ucast_err || mcast_err) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; } - if (!mcast_status) { + if (!mcast_err) { if (allmulti && !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", @@ -3127,7 +3098,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) vf->vf_id); } - if (!ucast_status) { + if (!ucast_err) { if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id); @@ -3807,8 +3778,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, { struct device *dev = ice_pf_to_dev(vf->pf); u8 *mac_addr = vc_ether_addr->addr; - enum ice_status status; - int ret = 0; + int ret; /* device MAC already added */ if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr)) @@ -3819,18 +3789,17 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, return -EPERM; } - status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); - if (status == ICE_ERR_ALREADY_EXISTS) { + ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); + if (ret == -EEXIST) { dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr, vf->vf_id); /* don't return since we might need to update * the primary MAC in ice_vfhw_mac_add() below */ - ret = -EEXIST; - } else if (status) { - dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n", - mac_addr, vf->vf_id, ice_stat_str(status)); - return -EIO; + } else if (ret) { + dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n", + mac_addr, vf->vf_id, ret); + return ret; } else { vf->num_mac++; } @@ -3907,20 +3876,20 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, { struct device *dev = ice_pf_to_dev(vf->pf); u8 *mac_addr = vc_ether_addr->addr; - enum ice_status status; + int status; if (!ice_can_vf_change_mac(vf) && ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) return 0; status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI); - if (status == ICE_ERR_DOES_NOT_EXIST) { + if (status == -ENOENT) { dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr, vf->vf_id); return -ENOENT; } else if (status) { - dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n", - mac_addr, vf->vf_id, ice_stat_str(status)); + dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n", + mac_addr, vf->vf_id, status); return -EIO; } @@ -5283,9 +5252,9 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, s16 vf_id = le16_to_cpu(event->desc.retval); struct device *dev = ice_pf_to_dev(pf); struct ice_mbx_data mbxdata; - enum ice_status status; bool malvf = false; struct ice_vf *vf; + int status; if (ice_validate_vf_id(pf, vf_id)) return false; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 7e28ecbbe7af..752487a1bdd6 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -203,6 +203,8 @@ void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_print_vfs_mdd_events(struct ice_pf *pf); void ice_print_vf_rx_mdd_event(struct ice_vf *vf); +bool +ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto); struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index ff55cb415b11..bb9a80847298 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -383,6 +383,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) while (i--) { dma = xsk_buff_xdp_get_dma(*xdp); rx_desc->read.pkt_addr = cpu_to_le64(dma); + rx_desc->wb.status_error0 = 0; rx_desc++; xdp++; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 0011b15e678c..0ac4cc5eaa2d 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1015,10 +1015,6 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, bool is_l2 = false; u32 regval; - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 8e448288ee26..142c57b7a451 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1718,24 +1718,26 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring, static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, struct igc_rx_buffer *rx_buffer, - union igc_adv_rx_desc *rx_desc, - unsigned int size) + struct xdp_buff *xdp) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int size = xdp->data_end - xdp->data; unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + unsigned int metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; /* prefetch first cache line of first page */ - net_prefetch(va); + net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = build_skb(va - IGC_SKB_PAD, truesize); + skb = build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, IGC_SKB_PAD); + skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, size); + if (metasize) + skb_metadata_set(skb, metasize); igc_rx_buffer_flip(rx_buffer, truesize); return skb; @@ -1746,6 +1748,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, struct xdp_buff *xdp, ktime_t timestamp) { + unsigned int metasize = xdp->data - xdp->data_meta; unsigned int size = xdp->data_end - xdp->data; unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); void *va = xdp->data; @@ -1753,10 +1756,11 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, struct sk_buff *skb; /* prefetch first cache line of first page */ - net_prefetch(va); + net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IGC_RX_HDR_LEN + metasize); if (unlikely(!skb)) return NULL; @@ -1769,7 +1773,13 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, + ALIGN(headlen + metasize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } /* update all of the pointers */ size -= headlen; @@ -2354,7 +2364,8 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) if (!skb) { xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), - igc_rx_offset(rx_ring) + pkt_offset, size, false); + igc_rx_offset(rx_ring) + pkt_offset, + size, true); skb = igc_xdp_run_prog(adapter, &xdp); } @@ -2378,7 +2389,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) } else if (skb) igc_add_rx_frag(rx_ring, rx_buffer, skb, size); else if (ring_uses_build_skb(rx_ring)) - skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); + skb = igc_build_skb(rx_ring, rx_buffer, &xdp); else skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, timestamp); @@ -2448,8 +2459,10 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); - if (metasize) + if (metasize) { skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } return skb; } diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 30568e3544cd..71813fa8f928 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -560,10 +560,6 @@ static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, struct hwtstamp_config *config) { - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: igc_ptp_disable_tx_timestamp(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 23ddfd79fc8b..336426a67ac1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -992,10 +992,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, bool is_l2 = false; u32 regval; - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 2368ae3f0e10..e3c5d64ba340 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3948,7 +3948,7 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { /* SGMII mode receives the state from the PHY */ new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; - new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + new_clk = MVNETA_GMAC_1MS_CLOCK_ENABLE; new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | MVNETA_GMAC_FORCE_LINK_PASS | MVNETA_GMAC_CONFIG_MII_SPEED | @@ -3960,7 +3960,7 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, } else { /* 802.3z negotiation - only 1000base-X */ new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; - new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + new_clk = MVNETA_GMAC_1MS_CLOCK_ENABLE; new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | MVNETA_GMAC_FORCE_LINK_PASS | MVNETA_GMAC_CONFIG_MII_SPEED)) | @@ -3973,6 +3973,10 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN; } + /* Set the 1ms clock divisor */ + if (new_clk == MVNETA_GMAC_1MS_CLOCK_ENABLE) + new_clk |= clk_get_rate(pp->clk) / 1000; + /* Armada 370 documentation says we can only change the port mode * and in-band enable when the link is down, so force it down * while making these changes. We also do this for GMAC_CTRL2 @@ -5271,6 +5275,7 @@ static int mvneta_probe(struct platform_device *pdev) pp->phylink_config.dev = &dev->dev; pp->phylink_config.type = PHYLINK_NETDEV; + pp->phylink_config.legacy_pre_march2020 = true; pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index a48e804c46f2..b1cce4425296 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -2963,11 +2963,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); if (priv->percpu_pools) { - err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0); + err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0); if (err < 0) goto err_free_dma; - err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0); + err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0); if (err < 0) goto err_unregister_rxq_short; @@ -5142,9 +5142,6 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - if (config.flags) - return -EINVAL; - if (config.tx_type != HWTSTAMP_TX_OFF && config.tx_type != HWTSTAMP_TX_ON) return -ERANGE; @@ -7454,7 +7451,7 @@ static int mvpp2_probe(struct platform_device *pdev) shared = num_present_cpus() - priv->nthreads; if (shared > 0) - bitmap_fill(&priv->lock_map, + bitmap_set(&priv->lock_map, 0, min_t(int, shared, MVPP2_MAX_THREADS)); for (i = 0; i < MVPP2_MAX_THREADS; i++) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index cb56e171ddd4..3ca6b942ebe2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2341,7 +2341,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, goto free_regions; break; default: - return err; + goto free_regions; } mw->mbox_wq = alloc_workqueue(name, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 1333edf1c361..6080ebd9bd94 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -2002,10 +2002,6 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: otx2_config_hw_tx_tstamp(pfvf, false); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 0ef68fdd1f26..61c20907315f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -5,6 +5,8 @@ * */ +#include <linux/module.h> + #include "otx2_common.h" #include "otx2_ptp.h" diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c index da0b6525ef9a..fc7f2fedafd7 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c @@ -419,11 +419,8 @@ struct prestera_acl_rule_entry * prestera_acl_rule_entry_find(struct prestera_acl *acl, struct prestera_acl_rule_entry_key *key) { - struct prestera_acl_rule_entry *e; - - e = rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key, - __prestera_acl_rule_entry_ht_params); - return IS_ERR(e) ? NULL : e; + return rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key, + __prestera_acl_rule_entry_ht_params); } static int __prestera_acl_rule_entry2hw_del(struct prestera_switch *sw, diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c index 92cb5e9099c6..6282c9822e2b 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -443,7 +443,7 @@ struct prestera_msg_counter_resp { __le32 offset; __le32 num_counters; __le32 done; - struct prestera_msg_counter_stats stats[0]; + struct prestera_msg_counter_stats stats[]; }; struct prestera_msg_span_req { @@ -1900,7 +1900,7 @@ int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx, .block_id = __cpu_to_le32(idx), .num_counters = __cpu_to_le32(*len), }; - size_t size = sizeof(*resp) + sizeof(*resp->stats) * (*len); + size_t size = struct_size(resp, stats, *len); int err, i; resp = kmalloc(size, GFP_KERNEL); diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index c357c193378e..86d356b4388d 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config NET_VENDOR_MEDIATEK bool "MediaTek devices" - depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 + depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST help If you have a Mediatek SoC with ethernet, say Y. @@ -10,6 +10,7 @@ if NET_VENDOR_MEDIATEK config NET_MEDIATEK_SOC tristate "MediaTek SoC Gigabit Ethernet support" depends on NET_DSA || !NET_DSA + select PINCTRL select PHYLINK select DIMLIB help diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index de4152e2e3e4..a068cf5c970f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2923,6 +2923,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->phylink_config.dev = ð->netdev[id]->dev; mac->phylink_config.type = PHYLINK_NETDEV; + /* This driver makes use of state->speed/state->duplex in + * mac_config + */ + mac->phylink_config.legacy_pre_march2020 = true; mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index e97d97e94231..ed5038d98ef6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -670,7 +670,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void) MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000, - ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + ETHTOOL_LINK_MODE_1000baseX_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000, @@ -682,9 +682,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void) MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000, - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000, - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000, ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 3f6d5c384637..ad1e4caf48bf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2286,9 +2286,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, bool carry_xdp_prog) { struct bpf_prog *xdp_prog; - int i, t; + int i, t, ret; - mlx4_en_copy_priv(tmp, priv, prof); + ret = mlx4_en_copy_priv(tmp, priv, prof); + if (ret) { + en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n", + __func__); + return ret; + } if (mlx4_en_alloc_resources(tmp)) { en_warn(priv, @@ -2422,10 +2427,6 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - /* device doesn't support time stamping */ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 92056452a9e3..4ba1a78c6515 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -115,6 +115,7 @@ config MLX5_TC_CT config MLX5_TC_SAMPLE bool "MLX5 TC sample offload support" depends on MLX5_CLS_ACT + depends on PSAMPLE=y || PSAMPLE=n || MLX5_CORE=m default y help Say Y here if you want to support offloading sample rules via tc diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index e63bb9ceb9c0..e592e0955c71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -46,6 +46,15 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \ en/tc/post_act.o en/tc/int_port.o + +mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \ + en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \ + en/tc/act/tun.o en/tc/act/csum.o en/tc/act/pedit.o \ + en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \ + en/tc/act/mirred.o en/tc/act/mirred_nic.o \ + en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \ + en/tc/act/redirect_ingress.o + mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 8eaa24d865c5..a46284ca5172 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -341,6 +341,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DEALLOC_SF: case MLX5_CMD_OP_DESTROY_UCTX: case MLX5_CMD_OP_DESTROY_UMEM: + case MLX5_CMD_OP_MODIFY_RQT: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -446,7 +447,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_MODIFY_TIS: case MLX5_CMD_OP_QUERY_TIS: case MLX5_CMD_OP_CREATE_RQT: - case MLX5_CMD_OP_MODIFY_RQT: case MLX5_CMD_OP_QUERY_RQT: case MLX5_CMD_OP_CREATE_FLOW_TABLE: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 48b12ee44b8d..e77c4159713f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -173,7 +173,7 @@ struct page_pool; #define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\ ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT) -#define MLX5E_MAX_KLM_PER_WQE(mdev) \ +#define MLX5E_MAX_KLM_PER_WQE \ MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE) #define MLX5E_MSG_LEVEL NETIF_MSG_LINK @@ -1057,7 +1057,6 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv, mlx5e_fp_preactivate preactivate, void *context, bool reset); int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv); -int mlx5e_num_channels_changed(struct mlx5e_priv *priv); int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index f8c29022dbb2..66180ffb4606 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -717,7 +717,7 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); u32 wqebbs; - max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); + max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE; max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; rest = max_hd_per_wqe % max_klm_per_umr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index 142953847996..0015a81eb9a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -13,6 +13,9 @@ struct mlx5e_rx_res { unsigned int max_nch; u32 drop_rqn; + struct mlx5e_packet_merge_param pkt_merge_param; + struct rw_semaphore pkt_merge_param_sem; + struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS]; bool rss_active; u32 rss_rqns[MLX5E_INDIR_RQT_SIZE]; @@ -392,6 +395,7 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res) if (err) goto out; + /* Separated from the channels RQs, does not share pkt_merge state with them */ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, mlx5e_rqt_get_rqtn(&res->ptp.rqt), inner_ft_support); @@ -447,6 +451,9 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, res->max_nch = max_nch; res->drop_rqn = drop_rqn; + res->pkt_merge_param = *init_pkt_merge_param; + init_rwsem(&res->pkt_merge_param_sem); + err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch); if (err) goto err_out; @@ -513,7 +520,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res) return mlx5e_tir_get_tirn(&res->ptp.tir); } -u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix) +static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix) { return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt); } @@ -656,6 +663,9 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res, if (!builder) return -ENOMEM; + down_write(&res->pkt_merge_param_sem); + res->pkt_merge_param = *pkt_merge_param; + mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param); final_err = 0; @@ -681,6 +691,7 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res, } } + up_write(&res->pkt_merge_param_sem); mlx5e_tir_builder_free(builder); return final_err; } @@ -689,3 +700,31 @@ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res * { return mlx5e_rss_get_hash(res->rss[0]); } + +int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq, + struct mlx5e_tir *tir) +{ + bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + struct mlx5e_tir_builder *builder; + u32 rqtn; + int err; + + builder = mlx5e_tir_builder_alloc(false); + if (!builder) + return -ENOMEM; + + rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq); + + mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn, + inner_ft_support); + mlx5e_tir_builder_build_direct(builder); + mlx5e_tir_builder_build_tls(builder); + down_read(&res->pkt_merge_param_sem); + mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param); + err = mlx5e_tir_init(tir, builder, res->mdev, false); + up_read(&res->pkt_merge_param_sem); + + mlx5e_tir_builder_free(builder); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h index d09f7d174a51..b39b20a720e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -37,9 +37,6 @@ u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res); -/* RQTN getters for modules that create their own TIRs */ -u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix); - /* Activate/deactivate API */ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs); void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res); @@ -69,4 +66,7 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx); /* Workaround for hairpin */ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res); +/* Accel TIRs */ +int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq, + struct mlx5e_tir *tir); #endif /* __MLX5_EN_RX_RES_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c new file mode 100644 index 000000000000..b0de6b999675 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_accept = { + .can_offload = tc_act_can_offload_accept, + .parse_action = tc_act_parse_accept, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c new file mode 100644 index 000000000000..e600924e30ea --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" +#include "mlx5_core.h" + +/* Must be aligned with enum flow_action_id. */ +static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = { + &mlx5e_tc_act_accept, + &mlx5e_tc_act_drop, + &mlx5e_tc_act_trap, + &mlx5e_tc_act_goto, + &mlx5e_tc_act_mirred, + &mlx5e_tc_act_mirred, + &mlx5e_tc_act_redirect_ingress, + NULL, /* FLOW_ACTION_MIRRED_INGRESS, */ + &mlx5e_tc_act_vlan, + &mlx5e_tc_act_vlan, + &mlx5e_tc_act_vlan_mangle, + &mlx5e_tc_act_tun_encap, + &mlx5e_tc_act_tun_decap, + &mlx5e_tc_act_pedit, + &mlx5e_tc_act_pedit, + &mlx5e_tc_act_csum, + NULL, /* FLOW_ACTION_MARK, */ + &mlx5e_tc_act_ptype, + NULL, /* FLOW_ACTION_PRIORITY, */ + NULL, /* FLOW_ACTION_WAKE, */ + NULL, /* FLOW_ACTION_QUEUE, */ + &mlx5e_tc_act_sample, + NULL, /* FLOW_ACTION_POLICE, */ + &mlx5e_tc_act_ct, + NULL, /* FLOW_ACTION_CT_METADATA, */ + &mlx5e_tc_act_mpls_push, + &mlx5e_tc_act_mpls_pop, +}; + +/* Must be aligned with enum flow_action_id. */ +static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = { + &mlx5e_tc_act_accept, + &mlx5e_tc_act_drop, + NULL, /* FLOW_ACTION_TRAP, */ + &mlx5e_tc_act_goto, + &mlx5e_tc_act_mirred_nic, + NULL, /* FLOW_ACTION_MIRRED, */ + NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */ + NULL, /* FLOW_ACTION_MIRRED_INGRESS, */ + NULL, /* FLOW_ACTION_VLAN_PUSH, */ + NULL, /* FLOW_ACTION_VLAN_POP, */ + NULL, /* FLOW_ACTION_VLAN_MANGLE, */ + NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */ + NULL, /* FLOW_ACTION_TUNNEL_DECAP, */ + &mlx5e_tc_act_pedit, + &mlx5e_tc_act_pedit, + &mlx5e_tc_act_csum, + &mlx5e_tc_act_mark, + NULL, /* FLOW_ACTION_PTYPE, */ + NULL, /* FLOW_ACTION_PRIORITY, */ + NULL, /* FLOW_ACTION_WAKE, */ + NULL, /* FLOW_ACTION_QUEUE, */ + NULL, /* FLOW_ACTION_SAMPLE, */ + NULL, /* FLOW_ACTION_POLICE, */ + &mlx5e_tc_act_ct, +}; + +/** + * mlx5e_tc_act_get() - Get an action parser for an action id. + * @act_id: Flow action id. + * @ns_type: flow namespace type. + */ +struct mlx5e_tc_act * +mlx5e_tc_act_get(enum flow_action_id act_id, + enum mlx5_flow_namespace_type ns_type) +{ + struct mlx5e_tc_act **tc_acts; + + tc_acts = ns_type == MLX5_FLOW_NAMESPACE_FDB ? tc_acts_fdb : tc_acts_nic; + + return tc_acts[act_id]; +} + +/** + * mlx5e_tc_act_init_parse_state() - Init a new parse_state. + * @parse_state: Parsing state. + * @flow: mlx5e tc flow being handled. + * @flow_action: flow action to parse. + * @extack: to set an error msg. + * + * The same parse_state should be passed to action parsers + * for tracking the current parsing state. + */ +void +mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state, + struct mlx5e_tc_flow *flow, + struct flow_action *flow_action, + struct netlink_ext_ack *extack) +{ + memset(parse_state, 0, sizeof(*parse_state)); + parse_state->flow = flow; + parse_state->num_actions = flow_action->num_entries; + parse_state->extack = extack; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h new file mode 100644 index 000000000000..26efa33de56f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_TC_ACT_H__ +#define __MLX5_EN_TC_ACT_H__ + +#include <net/tc_act/tc_pedit.h> +#include <net/flow_offload.h> +#include <linux/netlink.h> +#include "eswitch.h" +#include "pedit.h" + +struct mlx5_flow_attr; + +struct mlx5e_tc_act_parse_state { + unsigned int num_actions; + struct mlx5e_tc_flow *flow; + struct netlink_ext_ack *extack; + bool encap; + bool decap; + bool mpls_push; + bool ptype_host; + const struct ip_tunnel_info *tun_info; + struct pedit_headers_action hdrs[__PEDIT_CMD_MAX]; + int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; + int if_count; + struct mlx5_tc_ct_priv *ct_priv; +}; + +struct mlx5e_tc_act { + bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index); + + int (*parse_action)(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr); + + int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr); +}; + +extern struct mlx5e_tc_act mlx5e_tc_act_drop; +extern struct mlx5e_tc_act mlx5e_tc_act_trap; +extern struct mlx5e_tc_act mlx5e_tc_act_accept; +extern struct mlx5e_tc_act mlx5e_tc_act_mark; +extern struct mlx5e_tc_act mlx5e_tc_act_goto; +extern struct mlx5e_tc_act mlx5e_tc_act_tun_encap; +extern struct mlx5e_tc_act mlx5e_tc_act_tun_decap; +extern struct mlx5e_tc_act mlx5e_tc_act_csum; +extern struct mlx5e_tc_act mlx5e_tc_act_pedit; +extern struct mlx5e_tc_act mlx5e_tc_act_vlan; +extern struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle; +extern struct mlx5e_tc_act mlx5e_tc_act_mpls_push; +extern struct mlx5e_tc_act mlx5e_tc_act_mpls_pop; +extern struct mlx5e_tc_act mlx5e_tc_act_mirred; +extern struct mlx5e_tc_act mlx5e_tc_act_mirred_nic; +extern struct mlx5e_tc_act mlx5e_tc_act_ct; +extern struct mlx5e_tc_act mlx5e_tc_act_sample; +extern struct mlx5e_tc_act mlx5e_tc_act_ptype; +extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress; + +struct mlx5e_tc_act * +mlx5e_tc_act_get(enum flow_action_id act_id, + enum mlx5_flow_namespace_type ns_type); + +void +mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state, + struct mlx5e_tc_flow *flow, + struct flow_action *flow_action, + struct netlink_ext_ack *extack); + +#endif /* __MLX5_EN_TC_ACT_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c new file mode 100644 index 000000000000..29920ef0180a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/tc_act/tc_csum.h> +#include "act.h" +#include "en/tc_priv.h" + +static bool +csum_offload_supported(struct mlx5e_priv *priv, + u32 action, + u32 update_flags, + struct netlink_ext_ack *extack) +{ + u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP | + TCA_CSUM_UPDATE_FLAG_UDP; + + /* The HW recalcs checksums only if re-writing headers */ + if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) { + NL_SET_ERR_MSG_MOD(extack, + "TC csum action is only offloaded with pedit"); + netdev_warn(priv->netdev, + "TC csum action is only offloaded with pedit\n"); + return false; + } + + if (update_flags & ~prot_flags) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload TC csum action for some header/s"); + netdev_warn(priv->netdev, + "can't offload TC csum action for some header/s - flags %#x\n", + update_flags); + return false; + } + + return true; +} + +static bool +tc_act_can_offload_csum(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct mlx5e_tc_flow *flow = parse_state->flow; + + return csum_offload_supported(flow->priv, flow->attr->action, + act->csum_flags, parse_state->extack); +} + +static int +tc_act_parse_csum(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_csum = { + .can_offload = tc_act_can_offload_csum, + .parse_action = tc_act_parse_csum, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c new file mode 100644 index 000000000000..06ec30cdb269 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" +#include "en/tc_ct.h" + +static bool +tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + + if (flow_flag_test(parse_state->flow, SAMPLE)) { + NL_SET_ERR_MSG_MOD(extack, + "Sample action with connection tracking is not supported"); + return false; + } + + return true; +} + +static int +tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + int err; + + err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr, + &attr->parse_attr->mod_hdr_acts, + act, parse_state->extack); + if (err) + return err; + + flow_flag_set(parse_state->flow, CT); + + if (mlx5e_is_eswitch_flow(parse_state->flow)) + attr->esw_attr->split_count = attr->esw_attr->out_count; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_ct = { + .can_offload = tc_act_can_offload_ct, + .parse_action = tc_act_parse_ct, +}; + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c new file mode 100644 index 000000000000..2e29a23bed12 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_drop = { + .can_offload = tc_act_can_offload_drop, + .parse_action = tc_act_parse_drop, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c new file mode 100644 index 000000000000..f44515061228 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" +#include "eswitch.h" + +static int +validate_goto_chain(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + bool is_esw = mlx5e_is_eswitch_flow(flow); + bool ft_flow = mlx5e_is_ft_flow(flow); + u32 dest_chain = act->chain_index; + struct mlx5_fs_chains *chains; + struct mlx5_eswitch *esw; + u32 reformat_and_fwd; + u32 max_chain; + + esw = priv->mdev->priv.eswitch; + chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv); + max_chain = mlx5_chains_get_chain_range(chains); + reformat_and_fwd = is_esw ? + MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) : + MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table); + + if (ft_flow) { + NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); + return -EOPNOTSUPP; + } + + if (!mlx5_chains_backwards_supported(chains) && + dest_chain <= flow->attr->chain) { + NL_SET_ERR_MSG_MOD(extack, "Goto lower numbered chain isn't supported"); + return -EOPNOTSUPP; + } + + if (dest_chain > max_chain) { + NL_SET_ERR_MSG_MOD(extack, + "Requested destination chain is out of supported range"); + return -EOPNOTSUPP; + } + + if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | + MLX5_FLOW_CONTEXT_ACTION_DECAP) && + !reformat_and_fwd) { + NL_SET_ERR_MSG_MOD(extack, + "Goto chain is not allowed if action has reformat or decap"); + return -EOPNOTSUPP; + } + + return 0; +} + +static bool +tc_act_can_offload_goto(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + + if (validate_goto_chain(flow->priv, flow, act, extack)) + return false; + + return true; +} + +static int +tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->dest_chain = act->chain_index; + + return 0; +} + +static int +tc_act_post_parse_goto(struct mlx5e_tc_act_parse_state *parse_state, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + + if (!attr->dest_chain) + return 0; + + if (parse_state->decap) { + /* It can be supported if we'll create a mapping for + * the tunnel device only (without tunnel), and set + * this tunnel id with this decap flow. + * + * On restore (miss), we'll just set this saved tunnel + * device. + */ + + NL_SET_ERR_MSG_MOD(extack, "Decap with goto isn't supported"); + netdev_warn(priv->netdev, "Decap with goto isn't supported"); + return -EOPNOTSUPP; + } + + if (!mlx5e_is_eswitch_flow(flow) && parse_attr->mirred_ifindex[0]) { + NL_SET_ERR_MSG_MOD(extack, "Mirroring goto chain rules isn't supported"); + return -EOPNOTSUPP; + } + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_goto = { + .can_offload = tc_act_can_offload_goto, + .parse_action = tc_act_parse_goto, + .post_parse = tc_act_post_parse_goto, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c new file mode 100644 index 000000000000..d775c3d9edf3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en_tc.h" + +static bool +tc_act_can_offload_mark(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + if (act->mark & ~MLX5E_TC_FLOW_ID_MASK) { + NL_SET_ERR_MSG_MOD(parse_state->extack, "Bad flow mark, only 16 bit supported"); + return false; + } + + return true; +} + +static int +tc_act_parse_mark(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->nic_attr->flow_tag = act->mark; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_mark = { + .can_offload = tc_act_can_offload_mark, + .parse_action = tc_act_parse_mark, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c new file mode 100644 index 000000000000..a0832b86016c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/if_macvlan.h> +#include <linux/if_vlan.h> +#include <net/bareudp.h> +#include <net/bonding.h> +#include "act.h" +#include "vlan.h" +#include "en/tc_tun_encap.h" +#include "en/tc_priv.h" +#include "en_rep.h" + +static bool +same_vf_reps(struct mlx5e_priv *priv, struct net_device *out_dev) +{ + return mlx5e_eswitch_vf_rep(priv->netdev) && + priv->netdev == out_dev; +} + +static int +verify_uplink_forwarding(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + struct net_device *out_dev, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_rep_priv *rep_priv; + + /* Forwarding non encapsulated traffic between + * uplink ports is allowed only if + * termination_table_raw_traffic cap is set. + * + * Input vport was stored attr->in_rep. + * In LAG case, *priv* is the private data of + * uplink which may be not the input vport. + */ + rep_priv = mlx5e_rep_to_rep_priv(attr->esw_attr->in_rep); + + if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) && + mlx5e_eswitch_uplink_rep(out_dev))) + return 0; + + if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, + termination_table_raw_traffic)) { + NL_SET_ERR_MSG_MOD(extack, + "devices are both uplink, can't offload forwarding"); + pr_err("devices %s %s are both uplink, can't offload forwarding\n", + priv->netdev->name, out_dev->name); + return -EOPNOTSUPP; + } else if (out_dev != rep_priv->netdev) { + NL_SET_ERR_MSG_MOD(extack, + "devices are not the same uplink, can't offload forwarding"); + pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n", + priv->netdev->name, out_dev->name); + return -EOPNOTSUPP; + } + return 0; +} + +static bool +is_duplicated_output_device(struct net_device *dev, + struct net_device *out_dev, + int *ifindexes, int if_count, + struct netlink_ext_ack *extack) +{ + int i; + + for (i = 0; i < if_count; i++) { + if (ifindexes[i] == out_dev->ifindex) { + NL_SET_ERR_MSG_MOD(extack, "can't duplicate output to same device"); + netdev_err(dev, "can't duplicate output to same device: %s\n", + out_dev->name); + return true; + } + } + + return false; +} + +static struct net_device * +get_fdb_out_dev(struct net_device *uplink_dev, struct net_device *out_dev) +{ + struct net_device *fdb_out_dev = out_dev; + struct net_device *uplink_upper; + + rcu_read_lock(); + uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev); + if (uplink_upper && netif_is_lag_master(uplink_upper) && + uplink_upper == out_dev) { + fdb_out_dev = uplink_dev; + } else if (netif_is_lag_master(out_dev)) { + fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev)); + if (fdb_out_dev && + (!mlx5e_eswitch_rep(fdb_out_dev) || + !netdev_port_same_parent_id(fdb_out_dev, uplink_dev))) + fdb_out_dev = NULL; + } + rcu_read_unlock(); + return fdb_out_dev; +} + +static bool +tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct net_device *out_dev = act->dev; + struct mlx5e_priv *priv = flow->priv; + struct mlx5_esw_flow_attr *esw_attr; + + parse_attr = flow->attr->parse_attr; + esw_attr = flow->attr->esw_attr; + + if (!out_dev) { + /* out_dev is NULL when filters with + * non-existing mirred device are replayed to + * the driver. + */ + return false; + } + + if (parse_state->mpls_push && !netif_is_bareudp(out_dev)) { + NL_SET_ERR_MSG_MOD(extack, "mpls is supported only through a bareudp device"); + return false; + } + + if (mlx5e_is_ft_flow(flow) && out_dev == priv->netdev) { + /* Ignore forward to self rules generated + * by adding both mlx5 devs to the flow table + * block on a normal nft offload setup. + */ + return false; + } + + if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { + NL_SET_ERR_MSG_MOD(extack, + "can't support more output ports, can't offload forwarding"); + netdev_warn(priv->netdev, + "can't support more than %d output ports, can't offload forwarding\n", + esw_attr->out_count); + return false; + } + + if (parse_state->encap || + netdev_port_same_parent_id(priv->netdev, out_dev) || + netif_is_ovs_master(out_dev)) + return true; + + if (parse_attr->filter_dev != priv->netdev) { + /* All mlx5 devices are called to configure + * high level device filters. Therefore, the + * *attempt* to install a filter on invalid + * eswitch should not trigger an explicit error + */ + return false; + } + + NL_SET_ERR_MSG_MOD(extack, "devices are not on same switch HW, can't offload forwarding"); + netdev_warn(priv->netdev, + "devices %s %s not on same switch HW, can't offload forwarding\n", + netdev_name(priv->netdev), + out_dev->name); + + return false; +} + +static int +parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct net_device *out_dev = act->dev; + + parse_attr->mirred_ifindex[esw_attr->out_count] = out_dev->ifindex; + parse_attr->tun_info[esw_attr->out_count] = + mlx5e_dup_tun_info(parse_state->tun_info); + + if (!parse_attr->tun_info[esw_attr->out_count]) + return -ENOMEM; + + parse_state->encap = false; + esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP; + esw_attr->out_count++; + /* attr->dests[].rep is resolved when we handle encap */ + + return 0; +} + +static int +parse_mirred(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct net_device *out_dev = act->dev; + struct net_device *uplink_dev; + struct mlx5e_priv *out_priv; + struct mlx5_eswitch *esw; + int *ifindexes; + int if_count; + int err; + + esw = priv->mdev->priv.eswitch; + uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); + ifindexes = parse_state->ifindexes; + if_count = parse_state->if_count; + + if (is_duplicated_output_device(priv->netdev, out_dev, ifindexes, if_count, extack)) + return -EOPNOTSUPP; + + parse_state->ifindexes[if_count] = out_dev->ifindex; + parse_state->if_count++; + + out_dev = get_fdb_out_dev(uplink_dev, out_dev); + if (!out_dev) + return -ENODEV; + + if (is_vlan_dev(out_dev)) { + err = mlx5e_tc_act_vlan_add_push_action(priv, attr, &out_dev, extack); + if (err) + return err; + } + + if (is_vlan_dev(parse_attr->filter_dev)) { + err = mlx5e_tc_act_vlan_add_pop_action(priv, attr, extack); + if (err) + return err; + } + + if (netif_is_macvlan(out_dev)) + out_dev = macvlan_dev_real_dev(out_dev); + + err = verify_uplink_forwarding(priv, attr, out_dev, extack); + if (err) + return err; + + if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "devices are not on same switch HW, can't offload forwarding"); + return -EOPNOTSUPP; + } + + if (same_vf_reps(priv, out_dev)) { + NL_SET_ERR_MSG_MOD(extack, "can't forward from a VF to itself"); + return -EOPNOTSUPP; + } + + out_priv = netdev_priv(out_dev); + rpriv = out_priv->ppriv; + esw_attr->dests[esw_attr->out_count].rep = rpriv->rep; + esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev; + esw_attr->out_count++; + + return 0; +} + +static int +parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct net_device *out_dev = act->dev; + int err; + + err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex, + MLX5E_TC_INT_PORT_EGRESS, + &attr->action, esw_attr->out_count); + if (err) + return err; + + esw_attr->out_count++; + return 0; +} + +static int +tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct net_device *out_dev = act->dev; + int err = -EOPNOTSUPP; + + if (parse_state->encap) + err = parse_mirred_encap(parse_state, act, attr); + else if (netdev_port_same_parent_id(priv->netdev, out_dev)) + err = parse_mirred(parse_state, act, priv, attr); + else if (netif_is_ovs_master(out_dev)) + err = parse_mirred_ovs_master(parse_state, act, priv, attr); + + if (err) + return err; + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_mirred = { + .can_offload = tc_act_can_offload_mirred, + .parse_action = tc_act_parse_mirred, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c new file mode 100644 index 000000000000..2c74567b6d25 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + struct net_device *out_dev = act->dev; + struct mlx5e_priv *priv = flow->priv; + + if (act->id != FLOW_ACTION_REDIRECT) + return false; + + if (priv->netdev->netdev_ops != out_dev->netdev_ops || + !mlx5e_same_hw_devs(priv, netdev_priv(out_dev))) { + NL_SET_ERR_MSG_MOD(extack, + "devices are not on same switch HW, can't offload forwarding"); + netdev_warn(priv->netdev, + "devices %s %s not on same switch HW, can't offload forwarding\n", + netdev_name(priv->netdev), + out_dev->name); + return false; + } + + return true; +} + +static int +tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex; + flow_flag_set(parse_state->flow, HAIRPIN); + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_mirred_nic = { + .can_offload = tc_act_can_offload_mirred_nic, + .parse_action = tc_act_parse_mirred_nic, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c new file mode 100644 index 000000000000..784fc4f68b1e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <net/bareudp.h> +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_priv *priv = parse_state->flow->priv; + + if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_l2_to_l3_tunnel) || + act->mpls_push.proto != htons(ETH_P_MPLS_UC)) { + NL_SET_ERR_MSG_MOD(extack, "mpls push is supported only for mpls_uc protocol"); + return false; + } + + return true; +} + +static int +tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + parse_state->mpls_push = true; + + return 0; +} + +static bool +tc_act_can_offload_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + struct net_device *filter_dev; + + filter_dev = flow->attr->parse_attr->filter_dev; + + /* we only support mpls pop if it is the first action + * and the filter net device is bareudp. Subsequent + * actions can be pedit and the last can be mirred + * egress redirect. + */ + if (act_index) { + NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action"); + return false; + } + + if (!netif_is_bareudp(filter_dev)) { + NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only on bareudp devices"); + return false; + } + + return true; +} + +static int +tc_act_parse_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->parse_attr->eth.h_proto = act->mpls_pop.proto; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + flow_flag_set(parse_state->flow, L3_TO_L2_DECAP); + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_mpls_push = { + .can_offload = tc_act_can_offload_mpls_push, + .parse_action = tc_act_parse_mpls_push, +}; + +struct mlx5e_tc_act mlx5e_tc_act_mpls_pop = { + .can_offload = tc_act_can_offload_mpls_pop, + .parse_action = tc_act_parse_mpls_pop, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c new file mode 100644 index 000000000000..79addbbef087 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/if_vlan.h> +#include "act.h" +#include "pedit.h" +#include "en/tc_priv.h" +#include "en/mod_hdr.h" + +static int pedit_header_offsets[] = { + [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth), + [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4), + [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6), + [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp), + [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp), +}; + +#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype]) + +static int +set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset, + struct pedit_headers_action *hdrs, + struct netlink_ext_ack *extack) +{ + u32 *curr_pmask, *curr_pval; + + curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset); + curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset); + + if (*curr_pmask & mask) { /* disallow acting twice on the same location */ + NL_SET_ERR_MSG_MOD(extack, + "curr_pmask and new mask same. Acting twice on same location"); + goto out_err; + } + + *curr_pmask |= mask; + *curr_pval |= (val & mask); + + return 0; + +out_err: + return -EOPNOTSUPP; +} + +static int +parse_pedit_to_modify_hdr(struct mlx5e_priv *priv, + const struct flow_action_entry *act, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + struct netlink_ext_ack *extack) +{ + u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1; + u8 htype = act->mangle.htype; + int err = -EOPNOTSUPP; + u32 mask, val, offset; + + if (htype == FLOW_ACT_MANGLE_UNSPEC) { + NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded"); + goto out_err; + } + + if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) { + NL_SET_ERR_MSG_MOD(extack, "The pedit offload action is not supported"); + goto out_err; + } + + mask = act->mangle.mask; + val = act->mangle.val; + offset = act->mangle.offset; + + err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack); + if (err) + goto out_err; + + hdrs[cmd].pedits++; + + return 0; +out_err: + return err; +} + +static int +parse_pedit_to_reformat(const struct flow_action_entry *act, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) +{ + u32 mask, val, offset; + u32 *p; + + if (act->id != FLOW_ACTION_MANGLE) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported action id"); + return -EOPNOTSUPP; + } + + if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) { + NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported"); + return -EOPNOTSUPP; + } + + mask = ~act->mangle.mask; + val = act->mangle.val; + offset = act->mangle.offset; + p = (u32 *)&parse_attr->eth; + *(p + (offset >> 2)) |= (val & mask); + + return 0; +} + +int +mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv, + const struct flow_action_entry *act, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) +{ + if (flow && flow_flag_test(flow, L3_TO_L2_DECAP)) + return parse_pedit_to_reformat(act, parse_attr, extack); + + return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack); +} + +static bool +tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5e_tc_flow *flow = parse_state->flow; + enum mlx5_flow_namespace_type ns_type; + int err; + + ns_type = mlx5e_get_flow_namespace(flow); + + err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type, + attr->parse_attr, parse_state->hdrs, + flow, parse_state->extack); + if (err) + return err; + + if (flow_flag_test(flow, L3_TO_L2_DECAP)) + goto out; + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) + esw_attr->split_count = esw_attr->out_count; + +out: + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_pedit = { + .can_offload = tc_act_can_offload_pedit, + .parse_action = tc_act_parse_pedit, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h new file mode 100644 index 000000000000..da8ab03af58f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_TC_ACT_PEDIT_H__ +#define __MLX5_EN_TC_ACT_PEDIT_H__ + +#include "en_tc.h" + +struct pedit_headers { + struct ethhdr eth; + struct vlan_hdr vlan; + struct iphdr ip4; + struct ipv6hdr ip6; + struct tcphdr tcp; + struct udphdr udp; +}; + +struct pedit_headers_action { + struct pedit_headers vals; + struct pedit_headers masks; + u32 pedits; +}; + +int +mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv, + const struct flow_action_entry *act, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack); + +#endif /* __MLX5_EN_TC_ACT_PEDIT_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c new file mode 100644 index 000000000000..0819110193dc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_ptype(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct netlink_ext_ack *extack = parse_state->extack; + + if (act->ptype != PACKET_HOST) { + NL_SET_ERR_MSG_MOD(extack, "skbedit ptype is only supported with type host"); + return -EOPNOTSUPP; + } + + parse_state->ptype_host = true; + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_ptype = { + .can_offload = tc_act_can_offload_ptype, + .parse_action = tc_act_parse_ptype, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c new file mode 100644 index 000000000000..1c32e24e528d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct net_device *out_dev = act->dev; + struct mlx5_esw_flow_attr *esw_attr; + + parse_attr = flow->attr->parse_attr; + esw_attr = flow->attr->esw_attr; + + if (!out_dev) + return false; + + if (!netif_is_ovs_master(out_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to ingress is supported only for OVS internal ports"); + return false; + } + + if (netif_is_ovs_master(parse_attr->filter_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to ingress is not supported from internal port"); + return false; + } + + if (!parse_state->ptype_host) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to int port ingress requires ptype=host action"); + return false; + } + + if (esw_attr->out_count) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to int port ingress is supported only as single destination"); + return false; + } + + return true; +} + +static int +tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct net_device *out_dev = act->dev; + int err; + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + + err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex, + MLX5E_TC_INT_PORT_INGRESS, + &attr->action, esw_attr->out_count); + if (err) + return err; + + esw_attr->out_count++; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress = { + .can_offload = tc_act_can_offload_redirect_ingress, + .parse_action = tc_act_parse_redirect_ingress, +}; + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c new file mode 100644 index 000000000000..6699bdf5cf01 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <net/psample.h> +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + + if (flow_flag_test(parse_state->flow, CT)) { + NL_SET_ERR_MSG_MOD(extack, + "Sample action with connection tracking is not supported"); + return false; + } + + return true; +} + +static int +tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_sample_attr *sample_attr; + + sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL); + if (!sample_attr) + return -ENOMEM; + + sample_attr->rate = act->sample.rate; + sample_attr->group_num = act->sample.psample_group->group_num; + + if (act->sample.truncate) + sample_attr->trunc_size = act->sample.trunc_size; + + attr->sample_attr = sample_attr; + flow_flag_set(parse_state->flow, SAMPLE); + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_sample = { + .can_offload = tc_act_can_offload_sample, + .parse_action = tc_act_parse_sample, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c new file mode 100644 index 000000000000..046b64c2cec4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + + if (parse_state->num_actions != 1) { + NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only"); + return false; + } + + return true; +} + +static int +tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_trap = { + .can_offload = tc_act_can_offload_trap, + .parse_action = tc_act_parse_trap, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c new file mode 100644 index 000000000000..6f4a2cf46afd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_tun_encap.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_tun_encap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + if (!act->tunnel) { + NL_SET_ERR_MSG_MOD(parse_state->extack, + "Zero tunnel attributes is not supported"); + return false; + } + + return true; +} + +static int +tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + parse_state->tun_info = act->tunnel; + parse_state->encap = true; + + return 0; +} + +static bool +tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_tun_decap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + parse_state->decap = true; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_tun_encap = { + .can_offload = tc_act_can_offload_tun_encap, + .parse_action = tc_act_parse_tun_encap, +}; + +struct mlx5e_tc_act mlx5e_tc_act_tun_decap = { + .can_offload = tc_act_can_offload_tun_decap, + .parse_action = tc_act_parse_tun_decap, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c new file mode 100644 index 000000000000..70fc0c2d8813 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/if_vlan.h> +#include "act.h" +#include "vlan.h" +#include "en/tc_priv.h" + +static int +add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + u32 *action, struct netlink_ext_ack *extack) +{ + const struct flow_action_entry prio_tag_act = { + .vlan.vid = 0, + .vlan.prio = + MLX5_GET(fte_match_set_lyr_2_4, + mlx5e_get_match_headers_value(*action, + &parse_attr->spec), + first_prio) & + MLX5_GET(fte_match_set_lyr_2_4, + mlx5e_get_match_headers_criteria(*action, + &parse_attr->spec), + first_prio), + }; + + return mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, + &prio_tag_act, parse_attr, hdrs, action, + extack); +} + +static int +parse_tc_vlan_action(struct mlx5e_priv *priv, + const struct flow_action_entry *act, + struct mlx5_esw_flow_attr *attr, + u32 *action, + struct netlink_ext_ack *extack) +{ + u8 vlan_idx = attr->total_vlan; + + if (vlan_idx >= MLX5_FS_VLAN_DEPTH) { + NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported"); + return -EOPNOTSUPP; + } + + switch (act->id) { + case FLOW_ACTION_VLAN_POP: + if (vlan_idx) { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, + MLX5_FS_VLAN_DEPTH)) { + NL_SET_ERR_MSG_MOD(extack, "vlan pop action is not supported"); + return -EOPNOTSUPP; + } + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2; + } else { + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + } + break; + case FLOW_ACTION_VLAN_PUSH: + attr->vlan_vid[vlan_idx] = act->vlan.vid; + attr->vlan_prio[vlan_idx] = act->vlan.prio; + attr->vlan_proto[vlan_idx] = act->vlan.proto; + if (!attr->vlan_proto[vlan_idx]) + attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q); + + if (vlan_idx) { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, + MLX5_FS_VLAN_DEPTH)) { + NL_SET_ERR_MSG_MOD(extack, + "vlan push action is not supported for vlan depth > 1"); + return -EOPNOTSUPP; + } + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; + } else { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) && + (act->vlan.proto != htons(ETH_P_8021Q) || + act->vlan.prio)) { + NL_SET_ERR_MSG_MOD(extack, "vlan push action is not supported"); + return -EOPNOTSUPP; + } + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; + } + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN"); + return -EINVAL; + } + + attr->total_vlan = vlan_idx + 1; + + return 0; +} + +int +mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + struct net_device **out_dev, + struct netlink_ext_ack *extack) +{ + struct net_device *vlan_dev = *out_dev; + struct flow_action_entry vlan_act = { + .id = FLOW_ACTION_VLAN_PUSH, + .vlan.vid = vlan_dev_vlan_id(vlan_dev), + .vlan.proto = vlan_dev_vlan_proto(vlan_dev), + .vlan.prio = 0, + }; + int err; + + err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack); + if (err) + return err; + + rcu_read_lock(); + *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev)); + rcu_read_unlock(); + if (!*out_dev) + return -ENODEV; + + if (is_vlan_dev(*out_dev)) + err = mlx5e_tc_act_vlan_add_push_action(priv, attr, out_dev, extack); + + return err; +} + +int +mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + struct netlink_ext_ack *extack) +{ + struct flow_action_entry vlan_act = { + .id = FLOW_ACTION_VLAN_POP, + }; + int nest_level, err = 0; + + nest_level = attr->parse_attr->filter_dev->lower_level - + priv->netdev->lower_level; + while (nest_level--) { + err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, + extack); + if (err) + return err; + } + + return err; +} + +static bool +tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + int err; + + if (act->id == FLOW_ACTION_VLAN_PUSH && + (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) { + /* Replace vlan pop+push with vlan modify */ + attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + err = mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, act, + attr->parse_attr, parse_state->hdrs, + &attr->action, parse_state->extack); + } else { + err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action, + parse_state->extack); + } + + if (err) + return err; + + esw_attr->split_count = esw_attr->out_count; + + return 0; +} + +static int +tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; + struct pedit_headers_action *hdrs = parse_state->hdrs; + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int err; + + if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && + attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { + /* For prio tag mode, replace vlan pop with rewrite vlan prio + * tag rewrite. + */ + attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs, + &attr->action, extack); + if (err) + return err; + } + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_vlan = { + .can_offload = tc_act_can_offload_vlan, + .parse_action = tc_act_parse_vlan, + .post_parse = tc_act_post_parse_vlan, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h new file mode 100644 index 000000000000..3d62f13ab61f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_TC_ACT_VLAN_H__ +#define __MLX5_EN_TC_ACT_VLAN_H__ + +#include <net/flow_offload.h> +#include "en/tc_priv.h" + +struct pedit_headers_action; + +int +mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + struct net_device **out_dev, + struct netlink_ext_ack *extack); + +int +mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + struct netlink_ext_ack *extack); + +int +mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace, + const struct flow_action_entry *act, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + u32 *action, struct netlink_ext_ack *extack); + +#endif /* __MLX5_EN_TC_ACT_VLAN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c new file mode 100644 index 000000000000..63e36e7f53e3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/if_vlan.h> +#include "act.h" +#include "vlan.h" +#include "en/tc_priv.h" + +struct pedit_headers_action; + +int +mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace, + const struct flow_action_entry *act, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + u32 *action, struct netlink_ext_ack *extack) +{ + u16 mask16 = VLAN_VID_MASK; + u16 val16 = act->vlan.vid & VLAN_VID_MASK; + const struct flow_action_entry pedit_act = { + .id = FLOW_ACTION_MANGLE, + .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH, + .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI), + .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16), + .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16), + }; + u8 match_prio_mask, match_prio_val; + void *headers_c, *headers_v; + int err; + + headers_c = mlx5e_get_match_headers_criteria(*action, &parse_attr->spec); + headers_v = mlx5e_get_match_headers_value(*action, &parse_attr->spec); + + if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) && + MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) { + NL_SET_ERR_MSG_MOD(extack, "VLAN rewrite action must have VLAN protocol match"); + return -EOPNOTSUPP; + } + + match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); + match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); + if (act->vlan.prio != (match_prio_val & match_prio_mask)) { + NL_SET_ERR_MSG_MOD(extack, "Changing VLAN prio is not supported"); + return -EOPNOTSUPP; + } + + err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr, hdrs, + NULL, extack); + *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + return err; +} + +static bool +tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + enum mlx5_flow_namespace_type ns_type; + int err; + + ns_type = mlx5e_get_flow_namespace(parse_state->flow); + err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act, + attr->parse_attr, parse_state->hdrs, + &attr->action, parse_state->extack); + if (err) + return err; + + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) + attr->esw_attr->split_count = attr->esw_attr->out_count; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle = { + .can_offload = tc_act_can_offload_vlan_mangle, + .parse_action = tc_act_parse_vlan_mangle, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index b689701ac7d8..f832c26ff2c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -5,11 +5,14 @@ #define __MLX5_EN_TC_PRIV_H__ #include "en_tc.h" +#include "en/tc/act/act.h" #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1) #define MLX5E_TC_MAX_SPLITS 1 +#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains) + enum { MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, @@ -37,6 +40,7 @@ struct mlx5e_tc_flow_parse_attr { struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; struct ethhdr eth; + struct mlx5e_tc_act_parse_state parse_state; }; /* Helper struct for accessing a struct containing list_head array. @@ -115,7 +119,11 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr); +bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow); +bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow); bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow); +int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow); +bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv); static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) { @@ -176,4 +184,8 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow); struct mlx5e_tc_int_port_priv * mlx5e_get_int_port_priv(struct mlx5e_priv *priv); + +void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec); +void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec); + #endif /* __MLX5_EN_TC_PRIV_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index fb5397324aa4..2db9573a3fe6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -191,7 +191,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2; - if (skb->protocol == htons(ETH_P_IPV6)) + if (inner_ip_hdr(skb)->version == 6) eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index a2a9f68579dd..15711814d2d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -100,25 +100,6 @@ mlx5e_ktls_rx_resync_create_resp_list(void) return resp_list; } -static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn) -{ - struct mlx5e_tir_builder *builder; - int err; - - builder = mlx5e_tir_builder_alloc(false); - if (!builder) - return -ENOMEM; - - mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false); - mlx5e_tir_builder_build_direct(builder); - mlx5e_tir_builder_build_tls(builder); - err = mlx5e_tir_init(tir, builder, mdev, false); - - mlx5e_tir_builder_free(builder); - - return err; -} - static void accel_rule_handle_work(struct work_struct *work) { struct mlx5e_ktls_offload_context_rx *priv_rx; @@ -609,7 +590,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, struct mlx5_core_dev *mdev; struct mlx5e_priv *priv; int rxq, err; - u32 rqtn; tls_ctx = tls_get_ctx(sk); priv = netdev_priv(netdev); @@ -635,9 +615,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, priv_rx->sw_stats = &priv->tls->sw_stats; mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); - rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq); - - err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn); + err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir); if (err) goto err_create_tir; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 65571593ec5c..496977e7406e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2598,7 +2598,7 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, } } -int mlx5e_num_channels_changed(struct mlx5e_priv *priv) +static int mlx5e_num_channels_changed(struct mlx5e_priv *priv) { u16 count = priv->channels.params.num_channels; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 87c762066adb..6e0f88ea3701 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1086,6 +1086,10 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = { &MLX5E_STATS_GRP(pme), &MLX5E_STATS_GRP(channels), &MLX5E_STATS_GRP(per_port_buff_congest), +#ifdef CONFIG_MLX5_EN_IPSEC + &MLX5E_STATS_GRP(ipsec_sw), + &MLX5E_STATS_GRP(ipsec_hw), +#endif }; static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e384f6458c06..7e05d7592bce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -544,13 +544,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 klm_entries, u16 index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries; + u16 entries, pi, header_offset, err, wqe_bbs, new_entries; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; struct page *page = shampo->last_page; u64 addr = shampo->last_addr; struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; - int headroom; + int headroom, i; headroom = rq->buff.headroom; new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1)); @@ -602,9 +602,7 @@ update_klm: err_unmap: while (--i >= 0) { - if (--index < 0) - index = shampo->hd_per_wq - 1; - dma_info = &shampo->info[index]; + dma_info = &shampo->info[--index]; if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); mlx5e_page_release(rq, dma_info, true); @@ -621,7 +619,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) struct mlx5e_icosq *sq = rq->icosq; int i, err, max_klm_entries, len; - max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev); + max_klm_entries = MLX5E_MAX_KLM_PER_WQE; klm_entries = bitmap_find_window(shampo->bitmap, shampo->hd_per_wqe, shampo->hd_per_wq, shampo->pi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index cb66c08783c2..eec919f1b476 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -39,10 +39,6 @@ #include <linux/rhashtable.h> #include <linux/refcount.h> #include <linux/completion.h> -#include <linux/if_macvlan.h> -#include <net/tc_act/tc_pedit.h> -#include <net/tc_act/tc_csum.h> -#include <net/psample.h> #include <net/arp.h> #include <net/ipv6_stubs.h> #include <net/bareudp.h> @@ -62,6 +58,7 @@ #include "en/mod_hdr.h" #include "en/tc_tun_encap.h" #include "en/tc/sample.h" +#include "en/tc/act/act.h" #include "lib/devcom.h" #include "lib/geneve.h" #include "lib/fs_chains.h" @@ -70,8 +67,6 @@ #include "lag/lag.h" #include "lag/mp.h" -#define nic_chains(priv) ((priv)->fs.tc.chains) - #define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18) @@ -399,7 +394,7 @@ bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) return flow_flag_test(flow, ESWITCH); } -static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow) +bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow) { return flow_flag_test(flow, FT); } @@ -409,7 +404,7 @@ bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) return flow_flag_test(flow, OFFLOADED); } -static int get_flow_name_space(struct mlx5e_tc_flow *flow) +int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow) { return mlx5e_is_eswitch_flow(flow) ? MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL; @@ -420,7 +415,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ? + return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr : &priv->fs.tc.mod_hdr; } @@ -433,7 +428,7 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, struct mlx5e_mod_hdr_handle *mh; mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow), - get_flow_name_space(flow), + mlx5e_get_flow_namespace(flow), &parse_attr->mod_hdr_acts); if (IS_ERR(mh)) return PTR_ERR(mh); @@ -937,7 +932,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) { struct mlx5_flow_context *flow_context = &spec->flow_context; - struct mlx5_fs_chains *nic_chains = nic_chains(priv); + struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv); struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr; struct mlx5e_tc_table *tc = &priv->fs.tc; struct mlx5_flow_destination dest[2] = {}; @@ -1091,7 +1086,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv, struct mlx5_flow_handle *rule, struct mlx5_flow_attr *attr) { - struct mlx5_fs_chains *nic_chains = nic_chains(priv); + struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv); mlx5_del_flow_rules(rule); @@ -1123,7 +1118,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, mutex_lock(&priv->fs.tc.t_lock); if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && !IS_ERR_OR_NULL(tc->t)) { - mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL); + mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL); priv->fs.tc.t = NULL; } mutex_unlock(&priv->fs.tc.t_lock); @@ -1304,8 +1299,6 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow) mutex_unlock(&uplink_priv->unready_flows_lock); } -static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv); - bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev) { struct mlx5_core_dev *out_mdev, *route_mdev; @@ -1320,7 +1313,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_ route_mdev->coredev_type != MLX5_COREDEV_VF) return false; - return same_hw_devs(out_priv, route_priv); + return mlx5e_same_hw_devs(out_priv, route_priv); } int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) @@ -1367,7 +1360,7 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, struct mlx5_modify_hdr *mod_hdr; mod_hdr = mlx5_modify_header_alloc(priv->mdev, - get_flow_name_space(flow), + mlx5e_get_flow_namespace(flow), mod_hdr_acts->num_actions, mod_hdr_acts->actions); if (IS_ERR(mod_hdr)) @@ -1441,7 +1434,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, metadata); if (err) - return err; + goto err_out; } } @@ -1457,13 +1450,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, if (attr->chain) { NL_SET_ERR_MSG_MOD(extack, "Internal port rule is only supported on chain 0"); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; } if (attr->dest_chain) { NL_SET_ERR_MSG_MOD(extack, "Internal port rule offload doesn't support goto action"); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; } int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), @@ -1471,8 +1466,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, flow_flag_test(flow, EGRESS) ? MLX5E_TC_INT_PORT_EGRESS : MLX5E_TC_INT_PORT_INGRESS); - if (IS_ERR(int_port)) - return PTR_ERR(int_port); + if (IS_ERR(int_port)) { + err = PTR_ERR(int_port); + goto err_out; + } esw_attr->int_port = int_port; } @@ -2049,16 +2046,14 @@ static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec) outer_headers); } -static void *get_match_headers_value(u32 flags, - struct mlx5_flow_spec *spec) +void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec) { return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? get_match_inner_headers_value(spec) : get_match_outer_headers_value(spec); } -static void *get_match_headers_criteria(u32 flags, - struct mlx5_flow_spec *spec) +void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec) { return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? get_match_inner_headers_criteria(spec) : @@ -2603,55 +2598,6 @@ static int parse_cls_flower(struct mlx5e_priv *priv, return err; } -struct pedit_headers { - struct ethhdr eth; - struct vlan_hdr vlan; - struct iphdr ip4; - struct ipv6hdr ip6; - struct tcphdr tcp; - struct udphdr udp; -}; - -struct pedit_headers_action { - struct pedit_headers vals; - struct pedit_headers masks; - u32 pedits; -}; - -static int pedit_header_offsets[] = { - [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth), - [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4), - [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6), - [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp), - [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp), -}; - -#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype]) - -static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset, - struct pedit_headers_action *hdrs, - struct netlink_ext_ack *extack) -{ - u32 *curr_pmask, *curr_pval; - - curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset); - curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset); - - if (*curr_pmask & mask) { /* disallow acting twice on the same location */ - NL_SET_ERR_MSG_MOD(extack, - "curr_pmask and new mask same. Acting twice on same location"); - goto out_err; - } - - *curr_pmask |= mask; - *curr_pval |= (val & mask); - - return 0; - -out_err: - return -EOPNOTSUPP; -} - struct mlx5_fields { u8 field; u8 field_bsize; @@ -2772,8 +2718,8 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, u8 cmd; mod_acts = &parse_attr->mod_hdr_acts; - headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec); - headers_v = get_match_headers_value(*action_flags, &parse_attr->spec); + headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec); + headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec); set_masks = &hdrs[0].masks; add_masks = &hdrs[1].masks; @@ -2888,88 +2834,6 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, static const struct pedit_headers zero_masks = {}; -static int -parse_pedit_to_modify_hdr(struct mlx5e_priv *priv, - const struct flow_action_entry *act, int namespace, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct pedit_headers_action *hdrs, - struct netlink_ext_ack *extack) -{ - u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1; - int err = -EOPNOTSUPP; - u32 mask, val, offset; - u8 htype; - - htype = act->mangle.htype; - err = -EOPNOTSUPP; /* can't be all optimistic */ - - if (htype == FLOW_ACT_MANGLE_UNSPEC) { - NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded"); - goto out_err; - } - - if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) { - NL_SET_ERR_MSG_MOD(extack, - "The pedit offload action is not supported"); - goto out_err; - } - - mask = act->mangle.mask; - val = act->mangle.val; - offset = act->mangle.offset; - - err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack); - if (err) - goto out_err; - - hdrs[cmd].pedits++; - - return 0; -out_err: - return err; -} - -static int -parse_pedit_to_reformat(const struct flow_action_entry *act, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct netlink_ext_ack *extack) -{ - u32 mask, val, offset; - u32 *p; - - if (act->id != FLOW_ACTION_MANGLE) { - NL_SET_ERR_MSG_MOD(extack, "Unsupported action id"); - return -EOPNOTSUPP; - } - - if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) { - NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported"); - return -EOPNOTSUPP; - } - - mask = ~act->mangle.mask; - val = act->mangle.val; - offset = act->mangle.offset; - p = (u32 *)&parse_attr->eth; - *(p + (offset >> 2)) |= (val & mask); - - return 0; -} - -static int parse_tc_pedit_action(struct mlx5e_priv *priv, - const struct flow_action_entry *act, int namespace, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct pedit_headers_action *hdrs, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) -{ - if (flow && flow_flag_test(flow, L3_TO_L2_DECAP)) - return parse_pedit_to_reformat(act, parse_attr, extack); - - return parse_pedit_to_modify_hdr(priv, act, namespace, - parse_attr, hdrs, extack); -} - static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, struct mlx5e_tc_flow_parse_attr *parse_attr, struct pedit_headers_action *hdrs, @@ -3005,35 +2869,6 @@ out_dealloc_parsed_actions: return err; } -static bool csum_offload_supported(struct mlx5e_priv *priv, - u32 action, - u32 update_flags, - struct netlink_ext_ack *extack) -{ - u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP | - TCA_CSUM_UPDATE_FLAG_UDP; - - /* The HW recalcs checksums only if re-writing headers */ - if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) { - NL_SET_ERR_MSG_MOD(extack, - "TC csum action is only offloaded with pedit"); - netdev_warn(priv->netdev, - "TC csum action is only offloaded with pedit\n"); - return false; - } - - if (update_flags & ~prot_flags) { - NL_SET_ERR_MSG_MOD(extack, - "can't offload TC csum action for some header/s"); - netdev_warn(priv->netdev, - "can't offload TC csum action for some header/s - flags %#x\n", - update_flags); - return false; - } - - return true; -} - struct ip_ttl_word { __u8 ttl; __u8 protocol; @@ -3156,8 +2991,8 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, u8 ip_proto; int i; - headers_c = get_match_headers_criteria(actions, spec); - headers_v = get_match_headers_value(actions, spec); + headers_c = mlx5e_get_match_headers_criteria(actions, spec); + headers_v = mlx5e_get_match_headers_value(actions, spec); ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); /* for non-IP we only re-write MACs, so we're okay */ @@ -3264,7 +3099,7 @@ static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv return priv->mdev == peer_priv->mdev; } -static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) +bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) { struct mlx5_core_dev *fmdev, *pmdev; u64 fsystem_guid, psystem_guid; @@ -3278,126 +3113,45 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) return (fsystem_guid == psystem_guid); } -static bool same_vf_reps(struct mlx5e_priv *priv, - struct net_device *out_dev) -{ - return mlx5e_eswitch_vf_rep(priv->netdev) && - priv->netdev == out_dev; -} - -static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace, - const struct flow_action_entry *act, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct pedit_headers_action *hdrs, - u32 *action, struct netlink_ext_ack *extack) -{ - u16 mask16 = VLAN_VID_MASK; - u16 val16 = act->vlan.vid & VLAN_VID_MASK; - const struct flow_action_entry pedit_act = { - .id = FLOW_ACTION_MANGLE, - .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH, - .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI), - .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16), - .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16), - }; - u8 match_prio_mask, match_prio_val; - void *headers_c, *headers_v; - int err; - - headers_c = get_match_headers_criteria(*action, &parse_attr->spec); - headers_v = get_match_headers_value(*action, &parse_attr->spec); - - if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) && - MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) { - NL_SET_ERR_MSG_MOD(extack, - "VLAN rewrite action must have VLAN protocol match"); - return -EOPNOTSUPP; - } - - match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); - match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); - if (act->vlan.prio != (match_prio_val & match_prio_mask)) { - NL_SET_ERR_MSG_MOD(extack, - "Changing VLAN prio is not supported"); - return -EOPNOTSUPP; - } - - err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack); - *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - - return err; -} - static int -add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct pedit_headers_action *hdrs, - u32 *action, struct netlink_ext_ack *extack) -{ - const struct flow_action_entry prio_tag_act = { - .vlan.vid = 0, - .vlan.prio = - MLX5_GET(fte_match_set_lyr_2_4, - get_match_headers_value(*action, - &parse_attr->spec), - first_prio) & - MLX5_GET(fte_match_set_lyr_2_4, - get_match_headers_criteria(*action, - &parse_attr->spec), - first_prio), - }; - - return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, - &prio_tag_act, parse_attr, hdrs, action, - extack); -} - -static int validate_goto_chain(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - const struct flow_action_entry *act, - u32 actions, - struct netlink_ext_ack *extack) +parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, + struct flow_action *flow_action) { - bool is_esw = mlx5e_is_eswitch_flow(flow); + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; struct mlx5_flow_attr *attr = flow->attr; - bool ft_flow = mlx5e_is_ft_flow(flow); - u32 dest_chain = act->chain_index; - struct mlx5_fs_chains *chains; - struct mlx5_eswitch *esw; - u32 reformat_and_fwd; - u32 max_chain; + enum mlx5_flow_namespace_type ns_type; + struct mlx5e_priv *priv = flow->priv; + const struct flow_action_entry *act; + struct mlx5e_tc_act *tc_act; + int err, i; - esw = priv->mdev->priv.eswitch; - chains = is_esw ? esw_chains(esw) : nic_chains(priv); - max_chain = mlx5_chains_get_chain_range(chains); - reformat_and_fwd = is_esw ? - MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) : - MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table); - - if (ft_flow) { - NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); - return -EOPNOTSUPP; - } + ns_type = mlx5e_get_flow_namespace(flow); - if (!mlx5_chains_backwards_supported(chains) && - dest_chain <= attr->chain) { - NL_SET_ERR_MSG_MOD(extack, - "Goto lower numbered chain isn't supported"); - return -EOPNOTSUPP; - } + flow_action_for_each(i, act, flow_action) { + tc_act = mlx5e_tc_act_get(act->id, ns_type); + if (!tc_act) { + NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action"); + return -EOPNOTSUPP; + } - if (dest_chain > max_chain) { - NL_SET_ERR_MSG_MOD(extack, - "Requested destination chain is out of supported range"); - return -EOPNOTSUPP; + if (!tc_act->can_offload(parse_state, act, i)) + return -EOPNOTSUPP; + + err = tc_act->parse_action(parse_state, act, priv, attr); + if (err) + return err; } - if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | - MLX5_FLOW_CONTEXT_ACTION_DECAP) && - !reformat_and_fwd) { - NL_SET_ERR_MSG_MOD(extack, - "Goto chain is not allowed if action has reformat or decap"); - return -EOPNOTSUPP; + flow_action_for_each(i, act, flow_action) { + tc_act = mlx5e_tc_act_get(act->id, ns_type); + if (!tc_act || !tc_act->post_parse || + !tc_act->can_offload(parse_state, act, i)) + continue; + + err = tc_act->post_parse(parse_state, priv, attr); + if (err) + return err; } return 0; @@ -3418,7 +3172,7 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) return 0; - ns_type = get_flow_name_space(flow); + ns_type = mlx5e_get_flow_namespace(flow); err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs, &attr->action, extack); @@ -3443,19 +3197,9 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, } static int -parse_tc_nic_actions(struct mlx5e_priv *priv, - struct flow_action *flow_action, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) +flow_action_supported(struct flow_action *flow_action, + struct netlink_ext_ack *extack) { - struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5_flow_attr *attr = flow->attr; - struct pedit_headers_action hdrs[2] = {}; - const struct flow_action_entry *act; - struct mlx5_nic_flow_attr *nic_attr; - u32 action = 0; - int err, i; - if (!flow_action_has_entries(flow_action)) { NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries"); return -EINVAL; @@ -3467,108 +3211,35 @@ parse_tc_nic_actions(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - nic_attr = attr->nic_attr; - nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; - parse_attr = attr->parse_attr; - - flow_action_for_each(i, act, flow_action) { - switch (act->id) { - case FLOW_ACTION_ACCEPT: - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - break; - case FLOW_ACTION_DROP: - action |= MLX5_FLOW_CONTEXT_ACTION_DROP | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - break; - case FLOW_ACTION_MANGLE: - case FLOW_ACTION_ADD: - err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL, - parse_attr, hdrs, NULL, extack); - if (err) - return err; - - action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - break; - case FLOW_ACTION_VLAN_MANGLE: - err = add_vlan_rewrite_action(priv, - MLX5_FLOW_NAMESPACE_KERNEL, - act, parse_attr, hdrs, - &action, extack); - if (err) - return err; - - break; - case FLOW_ACTION_CSUM: - if (csum_offload_supported(priv, action, - act->csum_flags, - extack)) - break; - - return -EOPNOTSUPP; - case FLOW_ACTION_REDIRECT: { - struct net_device *peer_dev = act->dev; - - if (priv->netdev->netdev_ops == peer_dev->netdev_ops && - same_hw_devs(priv, netdev_priv(peer_dev))) { - parse_attr->mirred_ifindex[0] = peer_dev->ifindex; - flow_flag_set(flow, HAIRPIN); - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - } else { - NL_SET_ERR_MSG_MOD(extack, - "device is not on same HW, can't offload"); - netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", - peer_dev->name); - return -EOPNOTSUPP; - } - } - break; - case FLOW_ACTION_MARK: { - u32 mark = act->mark; - - if (mark & ~MLX5E_TC_FLOW_ID_MASK) { - NL_SET_ERR_MSG_MOD(extack, - "Bad flow mark - only 16 bit is supported"); - return -EOPNOTSUPP; - } - - nic_attr->flow_tag = mark; - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - } - break; - case FLOW_ACTION_GOTO: - err = validate_goto_chain(priv, flow, act, action, - extack); - if (err) - return err; + return 0; +} - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - attr->dest_chain = act->chain_index; - break; - case FLOW_ACTION_CT: - err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, - &parse_attr->mod_hdr_acts, - act, extack); - if (err) - return err; +static int +parse_tc_nic_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) +{ + struct mlx5e_tc_act_parse_state *parse_state; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + struct pedit_headers_action *hdrs; + int err; - flow_flag_set(flow, CT); - break; - default: - NL_SET_ERR_MSG_MOD(extack, - "The offload action is not supported in NIC action"); - return -EOPNOTSUPP; - } - } + err = flow_action_supported(flow_action, extack); + if (err) + return err; - attr->action = action; + attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; + parse_attr = attr->parse_attr; + parse_state = &parse_attr->parse_state; + mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); + parse_state->ct_priv = get_ct_priv(priv); + hdrs = parse_state->hdrs; - if (attr->dest_chain && parse_attr->mirred_ifindex[0]) { - NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported"); - return -EOPNOTSUPP; - } + err = parse_tc_actions(parse_state, flow_action); + if (err) + return err; err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack); if (err) @@ -3590,147 +3261,7 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && mlx5e_eswitch_vf_rep(priv->netdev) && mlx5e_eswitch_vf_rep(peer_netdev) && - same_hw_devs(priv, peer_priv)); -} - -static int parse_tc_vlan_action(struct mlx5e_priv *priv, - const struct flow_action_entry *act, - struct mlx5_esw_flow_attr *attr, - u32 *action, - struct netlink_ext_ack *extack) -{ - u8 vlan_idx = attr->total_vlan; - - if (vlan_idx >= MLX5_FS_VLAN_DEPTH) { - NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported"); - return -EOPNOTSUPP; - } - - switch (act->id) { - case FLOW_ACTION_VLAN_POP: - if (vlan_idx) { - if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, - MLX5_FS_VLAN_DEPTH)) { - NL_SET_ERR_MSG_MOD(extack, - "vlan pop action is not supported"); - return -EOPNOTSUPP; - } - - *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2; - } else { - *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; - } - break; - case FLOW_ACTION_VLAN_PUSH: - attr->vlan_vid[vlan_idx] = act->vlan.vid; - attr->vlan_prio[vlan_idx] = act->vlan.prio; - attr->vlan_proto[vlan_idx] = act->vlan.proto; - if (!attr->vlan_proto[vlan_idx]) - attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q); - - if (vlan_idx) { - if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, - MLX5_FS_VLAN_DEPTH)) { - NL_SET_ERR_MSG_MOD(extack, - "vlan push action is not supported for vlan depth > 1"); - return -EOPNOTSUPP; - } - - *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; - } else { - if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) && - (act->vlan.proto != htons(ETH_P_8021Q) || - act->vlan.prio)) { - NL_SET_ERR_MSG_MOD(extack, - "vlan push action is not supported"); - return -EOPNOTSUPP; - } - - *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; - } - break; - default: - NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN"); - return -EINVAL; - } - - attr->total_vlan = vlan_idx + 1; - - return 0; -} - -static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev, - struct net_device *out_dev) -{ - struct net_device *fdb_out_dev = out_dev; - struct net_device *uplink_upper; - - rcu_read_lock(); - uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev); - if (uplink_upper && netif_is_lag_master(uplink_upper) && - uplink_upper == out_dev) { - fdb_out_dev = uplink_dev; - } else if (netif_is_lag_master(out_dev)) { - fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev)); - if (fdb_out_dev && - (!mlx5e_eswitch_rep(fdb_out_dev) || - !netdev_port_same_parent_id(fdb_out_dev, uplink_dev))) - fdb_out_dev = NULL; - } - rcu_read_unlock(); - return fdb_out_dev; -} - -static int add_vlan_push_action(struct mlx5e_priv *priv, - struct mlx5_flow_attr *attr, - struct net_device **out_dev, - u32 *action, - struct netlink_ext_ack *extack) -{ - struct net_device *vlan_dev = *out_dev; - struct flow_action_entry vlan_act = { - .id = FLOW_ACTION_VLAN_PUSH, - .vlan.vid = vlan_dev_vlan_id(vlan_dev), - .vlan.proto = vlan_dev_vlan_proto(vlan_dev), - .vlan.prio = 0, - }; - int err; - - err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack); - if (err) - return err; - - rcu_read_lock(); - *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev)); - rcu_read_unlock(); - if (!*out_dev) - return -ENODEV; - - if (is_vlan_dev(*out_dev)) - err = add_vlan_push_action(priv, attr, out_dev, action, extack); - - return err; -} - -static int add_vlan_pop_action(struct mlx5e_priv *priv, - struct mlx5_flow_attr *attr, - u32 *action, - struct netlink_ext_ack *extack) -{ - struct flow_action_entry vlan_act = { - .id = FLOW_ACTION_VLAN_POP, - }; - int nest_level, err = 0; - - nest_level = attr->parse_attr->filter_dev->lower_level - - priv->netdev->lower_level; - while (nest_level--) { - err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack); - if (err) - return err; - } - - return err; + mlx5e_same_hw_devs(priv, peer_priv)); } static bool same_hw_reps(struct mlx5e_priv *priv, @@ -3742,7 +3273,7 @@ static bool same_hw_reps(struct mlx5e_priv *priv, return mlx5e_eswitch_rep(priv->netdev) && mlx5e_eswitch_rep(peer_netdev) && - same_hw_devs(priv, peer_priv); + mlx5e_same_hw_devs(priv, peer_priv); } static bool is_lag_dev(struct mlx5e_priv *priv, @@ -3766,66 +3297,6 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, same_port_devs(priv, netdev_priv(out_dev)); } -static bool is_duplicated_output_device(struct net_device *dev, - struct net_device *out_dev, - int *ifindexes, int if_count, - struct netlink_ext_ack *extack) -{ - int i; - - for (i = 0; i < if_count; i++) { - if (ifindexes[i] == out_dev->ifindex) { - NL_SET_ERR_MSG_MOD(extack, - "can't duplicate output to same device"); - netdev_err(dev, "can't duplicate output to same device: %s\n", - out_dev->name); - return true; - } - } - - return false; -} - -static int verify_uplink_forwarding(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct net_device *out_dev, - struct netlink_ext_ack *extack) -{ - struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_rep_priv *rep_priv; - - /* Forwarding non encapsulated traffic between - * uplink ports is allowed only if - * termination_table_raw_traffic cap is set. - * - * Input vport was stored attr->in_rep. - * In LAG case, *priv* is the private data of - * uplink which may be not the input vport. - */ - rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep); - - if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) && - mlx5e_eswitch_uplink_rep(out_dev))) - return 0; - - if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, - termination_table_raw_traffic)) { - NL_SET_ERR_MSG_MOD(extack, - "devices are both uplink, can't offload forwarding"); - pr_err("devices %s %s are both uplink, can't offload forwarding\n", - priv->netdev->name, out_dev->name); - return -EOPNOTSUPP; - } else if (out_dev != rep_priv->netdev) { - NL_SET_ERR_MSG_MOD(extack, - "devices are not the same uplink, can't offload forwarding"); - pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n", - priv->netdev->name, out_dev->name); - return -EOPNOTSUPP; - } - return 0; -} - int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, int ifindex, @@ -3865,386 +3336,33 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, return 0; } -static int parse_tc_fdb_actions(struct mlx5e_priv *priv, - struct flow_action *flow_action, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) +static int +parse_tc_fdb_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { - struct pedit_headers_action hdrs[2] = {}; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_act_parse_state *parse_state; struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5e_sample_attr sample_attr = {}; - const struct ip_tunnel_info *info = NULL; struct mlx5_flow_attr *attr = flow->attr; - int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; - bool ft_flow = mlx5e_is_ft_flow(flow); - const struct flow_action_entry *act; struct mlx5_esw_flow_attr *esw_attr; - bool encap = false, decap = false; - u32 action = attr->action; - int err, i, if_count = 0; - bool ptype_host = false; - bool mpls_push = false; - - if (!flow_action_has_entries(flow_action)) { - NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries"); - return -EINVAL; - } + struct pedit_headers_action *hdrs; + int err; - if (!flow_action_hw_stats_check(flow_action, extack, - FLOW_ACTION_HW_STATS_DELAYED_BIT)) { - NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); - return -EOPNOTSUPP; - } + err = flow_action_supported(flow_action, extack); + if (err) + return err; esw_attr = attr->esw_attr; parse_attr = attr->parse_attr; + parse_state = &parse_attr->parse_state; + mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); + parse_state->ct_priv = get_ct_priv(priv); + hdrs = parse_state->hdrs; - flow_action_for_each(i, act, flow_action) { - switch (act->id) { - case FLOW_ACTION_ACCEPT: - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT; - break; - case FLOW_ACTION_PTYPE: - if (act->ptype != PACKET_HOST) { - NL_SET_ERR_MSG_MOD(extack, - "skbedit ptype is only supported with type host"); - return -EOPNOTSUPP; - } - - ptype_host = true; - break; - case FLOW_ACTION_DROP: - action |= MLX5_FLOW_CONTEXT_ACTION_DROP | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - break; - case FLOW_ACTION_TRAP: - if (!flow_offload_has_one_action(flow_action)) { - NL_SET_ERR_MSG_MOD(extack, - "action trap is supported as a sole action only"); - return -EOPNOTSUPP; - } - action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT); - attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; - break; - case FLOW_ACTION_MPLS_PUSH: - if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, - reformat_l2_to_l3_tunnel) || - act->mpls_push.proto != htons(ETH_P_MPLS_UC)) { - NL_SET_ERR_MSG_MOD(extack, - "mpls push is supported only for mpls_uc protocol"); - return -EOPNOTSUPP; - } - mpls_push = true; - break; - case FLOW_ACTION_MPLS_POP: - /* we only support mpls pop if it is the first action - * and the filter net device is bareudp. Subsequent - * actions can be pedit and the last can be mirred - * egress redirect. - */ - if (i) { - NL_SET_ERR_MSG_MOD(extack, - "mpls pop supported only as first action"); - return -EOPNOTSUPP; - } - if (!netif_is_bareudp(parse_attr->filter_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "mpls pop supported only on bareudp devices"); - return -EOPNOTSUPP; - } - - parse_attr->eth.h_proto = act->mpls_pop.proto; - action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; - flow_flag_set(flow, L3_TO_L2_DECAP); - break; - case FLOW_ACTION_MANGLE: - case FLOW_ACTION_ADD: - err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB, - parse_attr, hdrs, flow, extack); - if (err) - return err; - - if (!flow_flag_test(flow, L3_TO_L2_DECAP)) { - action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - esw_attr->split_count = esw_attr->out_count; - } - break; - case FLOW_ACTION_CSUM: - if (csum_offload_supported(priv, action, - act->csum_flags, extack)) - break; - - return -EOPNOTSUPP; - case FLOW_ACTION_REDIRECT_INGRESS: { - struct net_device *out_dev; - - out_dev = act->dev; - if (!out_dev) - return -EOPNOTSUPP; - - if (!netif_is_ovs_master(out_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "redirect to ingress is supported only for OVS internal ports"); - return -EOPNOTSUPP; - } - - if (netif_is_ovs_master(parse_attr->filter_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "redirect to ingress is not supported from internal port"); - return -EOPNOTSUPP; - } - - if (!ptype_host) { - NL_SET_ERR_MSG_MOD(extack, - "redirect to int port ingress requires ptype=host action"); - return -EOPNOTSUPP; - } - - if (esw_attr->out_count) { - NL_SET_ERR_MSG_MOD(extack, - "redirect to int port ingress is supported only as single destination"); - return -EOPNOTSUPP; - } - - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - - err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex, - MLX5E_TC_INT_PORT_INGRESS, - &action, esw_attr->out_count); - if (err) - return err; - - esw_attr->out_count++; - - break; - } - case FLOW_ACTION_REDIRECT: - case FLOW_ACTION_MIRRED: { - struct mlx5e_priv *out_priv; - struct net_device *out_dev; - - out_dev = act->dev; - if (!out_dev) { - /* out_dev is NULL when filters with - * non-existing mirred device are replayed to - * the driver. - */ - return -EINVAL; - } - - if (mpls_push && !netif_is_bareudp(out_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "mpls is supported only through a bareudp device"); - return -EOPNOTSUPP; - } - - if (ft_flow && out_dev == priv->netdev) { - /* Ignore forward to self rules generated - * by adding both mlx5 devs to the flow table - * block on a normal nft offload setup. - */ - return -EOPNOTSUPP; - } - - if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { - NL_SET_ERR_MSG_MOD(extack, - "can't support more output ports, can't offload forwarding"); - netdev_warn(priv->netdev, - "can't support more than %d output ports, can't offload forwarding\n", - esw_attr->out_count); - return -EOPNOTSUPP; - } - - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - if (encap) { - parse_attr->mirred_ifindex[esw_attr->out_count] = - out_dev->ifindex; - parse_attr->tun_info[esw_attr->out_count] = - mlx5e_dup_tun_info(info); - if (!parse_attr->tun_info[esw_attr->out_count]) - return -ENOMEM; - encap = false; - esw_attr->dests[esw_attr->out_count].flags |= - MLX5_ESW_DEST_ENCAP; - esw_attr->out_count++; - /* attr->dests[].rep is resolved when we - * handle encap - */ - } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); - - if (is_duplicated_output_device(priv->netdev, - out_dev, - ifindexes, - if_count, - extack)) - return -EOPNOTSUPP; - - ifindexes[if_count] = out_dev->ifindex; - if_count++; - - out_dev = get_fdb_out_dev(uplink_dev, out_dev); - if (!out_dev) - return -ENODEV; - - if (is_vlan_dev(out_dev)) { - err = add_vlan_push_action(priv, attr, - &out_dev, - &action, extack); - if (err) - return err; - } - - if (is_vlan_dev(parse_attr->filter_dev)) { - err = add_vlan_pop_action(priv, attr, - &action, extack); - if (err) - return err; - } - - if (netif_is_macvlan(out_dev)) - out_dev = macvlan_dev_real_dev(out_dev); - - err = verify_uplink_forwarding(priv, flow, out_dev, extack); - if (err) - return err; - - if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "devices are not on same switch HW, can't offload forwarding"); - return -EOPNOTSUPP; - } - - if (same_vf_reps(priv, out_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "can't forward from a VF to itself"); - return -EOPNOTSUPP; - } - - out_priv = netdev_priv(out_dev); - rpriv = out_priv->ppriv; - esw_attr->dests[esw_attr->out_count].rep = rpriv->rep; - esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev; - esw_attr->out_count++; - } else if (netif_is_ovs_master(out_dev)) { - err = mlx5e_set_fwd_to_int_port_actions(priv, attr, - out_dev->ifindex, - MLX5E_TC_INT_PORT_EGRESS, - &action, - esw_attr->out_count); - if (err) - return err; - - esw_attr->out_count++; - } else if (parse_attr->filter_dev != priv->netdev) { - /* All mlx5 devices are called to configure - * high level device filters. Therefore, the - * *attempt* to install a filter on invalid - * eswitch should not trigger an explicit error - */ - return -EINVAL; - } else { - NL_SET_ERR_MSG_MOD(extack, - "devices are not on same switch HW, can't offload forwarding"); - netdev_warn(priv->netdev, - "devices %s %s not on same switch HW, can't offload forwarding\n", - priv->netdev->name, - out_dev->name); - return -EOPNOTSUPP; - } - } - break; - case FLOW_ACTION_TUNNEL_ENCAP: - info = act->tunnel; - if (info) { - encap = true; - } else { - NL_SET_ERR_MSG_MOD(extack, - "Zero tunnel attributes is not supported"); - return -EOPNOTSUPP; - } - - break; - case FLOW_ACTION_VLAN_PUSH: - case FLOW_ACTION_VLAN_POP: - if (act->id == FLOW_ACTION_VLAN_PUSH && - (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) { - /* Replace vlan pop+push with vlan modify */ - action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; - err = add_vlan_rewrite_action(priv, - MLX5_FLOW_NAMESPACE_FDB, - act, parse_attr, hdrs, - &action, extack); - } else { - err = parse_tc_vlan_action(priv, act, esw_attr, &action, extack); - } - if (err) - return err; - - esw_attr->split_count = esw_attr->out_count; - break; - case FLOW_ACTION_VLAN_MANGLE: - err = add_vlan_rewrite_action(priv, - MLX5_FLOW_NAMESPACE_FDB, - act, parse_attr, hdrs, - &action, extack); - if (err) - return err; - - esw_attr->split_count = esw_attr->out_count; - break; - case FLOW_ACTION_TUNNEL_DECAP: - decap = true; - break; - case FLOW_ACTION_GOTO: - err = validate_goto_chain(priv, flow, act, action, - extack); - if (err) - return err; - - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - attr->dest_chain = act->chain_index; - break; - case FLOW_ACTION_CT: - if (flow_flag_test(flow, SAMPLE)) { - NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported"); - return -EOPNOTSUPP; - } - err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, - &parse_attr->mod_hdr_acts, - act, extack); - if (err) - return err; - - flow_flag_set(flow, CT); - esw_attr->split_count = esw_attr->out_count; - break; - case FLOW_ACTION_SAMPLE: - if (flow_flag_test(flow, CT)) { - NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported"); - return -EOPNOTSUPP; - } - sample_attr.rate = act->sample.rate; - sample_attr.group_num = act->sample.psample_group->group_num; - if (act->sample.truncate) - sample_attr.trunc_size = act->sample.trunc_size; - flow_flag_set(flow, SAMPLE); - break; - default: - NL_SET_ERR_MSG_MOD(extack, - "The offload action is not supported in FDB action"); - return -EOPNOTSUPP; - } - } + err = parse_tc_actions(parse_state, flow_action); + if (err) + return err; /* Forward to/from internal port can only have 1 dest */ if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) && @@ -4254,23 +3372,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - /* always set IP version for indirect table handling */ - attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true); - - if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && - action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { - /* For prio tag mode, replace vlan pop with rewrite vlan prio - * tag rewrite. - */ - action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; - err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs, - &action, extack); - if (err) - return err; - } - - attr->action = action; - err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack); if (err) return err; @@ -4278,30 +3379,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; - if (attr->dest_chain && decap) { - /* It can be supported if we'll create a mapping for - * the tunnel device only (without tunnel), and set - * this tunnel id with this decap flow. - * - * On restore (miss), we'll just set this saved tunnel - * device. - */ - - NL_SET_ERR_MSG(extack, "Decap with goto isn't supported"); - netdev_warn(priv->netdev, "Decap with goto isn't supported"); - return -EOPNOTSUPP; - } - - /* Allocate sample attribute only when there is a sample action and - * no errors after parsing. - */ - if (flow_flag_test(flow, SAMPLE)) { - attr->sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL); - if (!attr->sample_attr) - return -ENOMEM; - *attr->sample_attr = sample_attr; - } - return 0; } @@ -4398,7 +3475,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, flow->cookie = f->cookie; flow->priv = priv; - attr = mlx5_alloc_flow_attr(get_flow_name_space(flow)); + attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow)); if (!attr) goto err_free; @@ -4493,6 +3570,9 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_free; + /* always set IP version for indirect table handling */ + flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true); + err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); if (err) goto err_free; @@ -4997,7 +4077,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, u16 peer_vhca_id; int bkt; - if (!same_hw_devs(priv, peer_priv)) + if (!mlx5e_same_hw_devs(priv, peer_priv)) return; peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index eb042f0f5a41..5ffae9b13066 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -151,7 +151,6 @@ enum { int mlx5e_tc_esw_init(struct rhashtable *tc_ht); void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); -bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow); int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, struct flow_cls_offload *f, unsigned long flags); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index ff0a07a91992..11bbcd5f5b8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -130,7 +130,7 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw, /* If vports min rate divider is 0 but their group has bw_share configured, then * need to set bw_share for vports to minimal value. */ - if (!group_level && !max_guarantee && group->bw_share) + if (!group_level && !max_guarantee && group && group->bw_share) return 1; return 0; } @@ -419,7 +419,7 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw, return err; /* Recalculate bw share weights of old and new groups */ - if (vport->qos.bw_share) { + if (vport->qos.bw_share || new_group->bw_share) { esw_qos_normalize_vports_min_rate(esw, curr_group, extack); esw_qos_normalize_vports_min_rate(esw, new_group, extack); } @@ -590,6 +590,7 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta if (IS_ERR(esw->qos.group0)) { esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n", PTR_ERR(esw->qos.group0)); + err = PTR_ERR(esw->qos.group0); goto err_group0; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 98de91fc3d99..3bd5ae6ecd04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -329,14 +329,25 @@ static bool esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) { struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + bool result = false; int i; - for (i = esw_attr->split_count; i < esw_attr->out_count; i++) + /* Indirect table is supported only for flows with in_port uplink + * and the destination is vport on the same eswitch as the uplink, + * return false in case at least one of destinations doesn't meet + * this criteria. + */ + for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { if (esw_attr->dests[i].rep && mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, - esw_attr->dests[i].mdev)) - return true; - return false; + esw_attr->dests[i].mdev)) { + result = true; + } else { + result = false; + break; + } + } + return result; } static int @@ -2512,6 +2523,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master, struct mlx5_eswitch *esw = master->priv.eswitch; struct mlx5_flow_table_attr ft_attr = { .max_fte = 1, .prio = 0, .level = 0, + .flags = MLX5_FLOW_TABLE_OTHER_VPORT, }; struct mlx5_flow_namespace *egress_ns; struct mlx5_flow_table *acl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 7e0e04cf26f8..b406e0367af6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -38,9 +38,10 @@ #include "fs_cmd.h" #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) +#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000) /* Max number of counters to query in bulk read is 32K */ #define MLX5_SW_MAX_COUNTERS_BULK BIT(15) -#define MLX5_SF_NUM_COUNTERS_BULK 8 +#define MLX5_INIT_COUNTERS_BULK 8 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) #define MLX5_FC_POOL_USED_BUFF_RATIO 10 @@ -145,13 +146,15 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev, spin_unlock(&fc_stats->counters_idr_lock); } -static int get_max_bulk_query_len(struct mlx5_core_dev *dev) +static int get_init_bulk_query_len(struct mlx5_core_dev *dev) { - int num_counters_bulk = mlx5_core_is_sf(dev) ? - MLX5_SF_NUM_COUNTERS_BULK : - MLX5_SW_MAX_COUNTERS_BULK; + return min_t(int, MLX5_INIT_COUNTERS_BULK, + (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); +} - return min_t(int, num_counters_bulk, +static int get_max_bulk_query_len(struct mlx5_core_dev *dev) +{ + return min_t(int, MLX5_SW_MAX_COUNTERS_BULK, (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); } @@ -177,7 +180,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev, { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; bool query_more_counters = (first->id <= last_id); - int max_bulk_len = get_max_bulk_query_len(dev); + int cur_bulk_len = fc_stats->bulk_query_len; u32 *data = fc_stats->bulk_query_out; struct mlx5_fc *counter = first; u32 bulk_base_id; @@ -189,7 +192,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev, bulk_base_id = counter->id & ~0x3; /* number of counters to query inc. the last counter */ - bulk_len = min_t(int, max_bulk_len, + bulk_len = min_t(int, cur_bulk_len, ALIGN(last_id - bulk_base_id + 1, 4)); err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, @@ -230,6 +233,41 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter) mlx5_fc_free(dev, counter); } +static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + int max_bulk_len = get_max_bulk_query_len(dev); + unsigned long now = jiffies; + u32 *bulk_query_out_tmp; + int max_out_len; + + if (fc_stats->bulk_query_alloc_failed && + time_before(now, fc_stats->next_bulk_query_alloc)) + return; + + max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len); + bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL); + if (!bulk_query_out_tmp) { + mlx5_core_warn_once(dev, + "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n", + max_bulk_len); + fc_stats->bulk_query_alloc_failed = true; + fc_stats->next_bulk_query_alloc = + now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD; + return; + } + + kfree(fc_stats->bulk_query_out); + fc_stats->bulk_query_out = bulk_query_out_tmp; + fc_stats->bulk_query_len = max_bulk_len; + if (fc_stats->bulk_query_alloc_failed) { + mlx5_core_info(dev, + "Flow counters bulk query buffer size increased, bulk_size(%d)\n", + max_bulk_len); + fc_stats->bulk_query_alloc_failed = false; + } +} + static void mlx5_fc_stats_work(struct work_struct *work) { struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, @@ -247,15 +285,22 @@ static void mlx5_fc_stats_work(struct work_struct *work) queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval); - llist_for_each_entry(counter, addlist, addlist) + llist_for_each_entry(counter, addlist, addlist) { mlx5_fc_stats_insert(dev, counter); + fc_stats->num_counters++; + } llist_for_each_entry_safe(counter, tmp, dellist, dellist) { mlx5_fc_stats_remove(dev, counter); mlx5_fc_release(dev, counter); + fc_stats->num_counters--; } + if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) && + fc_stats->num_counters > get_init_bulk_query_len(dev)) + mlx5_fc_stats_bulk_query_size_increase(dev); + if (time_before(now, fc_stats->next_query) || list_empty(&fc_stats->counters)) return; @@ -378,8 +423,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy); int mlx5_init_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - int max_bulk_len; - int max_out_len; + int init_bulk_len; + int init_out_len; spin_lock_init(&fc_stats->counters_idr_lock); idr_init(&fc_stats->counters_idr); @@ -387,11 +432,12 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) init_llist_head(&fc_stats->addlist); init_llist_head(&fc_stats->dellist); - max_bulk_len = get_max_bulk_query_len(dev); - max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len); - fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL); + init_bulk_len = get_init_bulk_query_len(dev); + init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len); + fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL); if (!fc_stats->bulk_query_out) return -ENOMEM; + fc_stats->bulk_query_len = init_bulk_len; fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); if (!fc_stats->wq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 75121bc1eaa5..737df402c927 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -840,6 +840,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms); add_timer(&health->timer); + + if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc)) + queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0); } void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) @@ -907,8 +910,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev) INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work); INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work); INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update); - if (mlx5_core_is_pf(dev)) - queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index ea1efdecc88c..051b20ec7bdb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -110,7 +110,7 @@ void mlx5i_cleanup(struct mlx5e_priv *priv) static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) { - struct mlx5e_sw_stats s = { 0 }; + struct rtnl_link_stats64 s = {}; int i, j; for (i = 0; i < priv->stats_nch; i++) { @@ -128,11 +128,17 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) s.tx_packets += sq_stats->packets; s.tx_bytes += sq_stats->bytes; - s.tx_queue_dropped += sq_stats->dropped; + s.tx_dropped += sq_stats->dropped; } } - memcpy(&priv->stats.sw, &s, sizeof(s)); + memset(&priv->stats.sw, 0, sizeof(s)); + + priv->stats.sw.rx_packets = s.rx_packets; + priv->stats.sw.rx_bytes = s.rx_bytes; + priv->stats.sw.tx_packets = s.tx_packets; + priv->stats.sw.tx_bytes = s.tx_bytes; + priv->stats.sw.tx_queue_dropped = s.tx_dropped; } void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index ad63dd45c8fb..a6592f9c3c05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -608,4 +608,5 @@ void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) if (port_sel->tunnel) mlx5_destroy_ttc_table(port_sel->inner.ttc); mlx5_lag_destroy_definers(ldev); + memset(port_sel, 0, sizeof(*port_sel)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c index 0dd96a6b140d..c1df0d3595d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c @@ -31,11 +31,11 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type dev->timeouts->to[type] = val; } -static void tout_set_def_val(struct mlx5_core_dev *dev) +void mlx5_tout_set_def_val(struct mlx5_core_dev *dev) { int i; - for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++) + for (i = 0; i < MAX_TIMEOUT_TYPES; i++) tout_set(dev, tout_def_sw_val[i], i); } @@ -45,7 +45,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev) if (!dev->timeouts) return -ENOMEM; - tout_set_def_val(dev); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h index 31faa5c17aa9..1c42ead782fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h @@ -34,6 +34,7 @@ int mlx5_tout_init(struct mlx5_core_dev *dev); void mlx5_tout_cleanup(struct mlx5_core_dev *dev); void mlx5_tout_query_iseg(struct mlx5_core_dev *dev); int mlx5_tout_query_dtor(struct mlx5_core_dev *dev); +void mlx5_tout_set_def_val(struct mlx5_core_dev *dev); u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type); #define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a92a92a52346..d97c9e86d7b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -992,11 +992,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) if (mlx5_core_is_pf(dev)) pcie_print_link_status(dev->pdev); - err = mlx5_tout_init(dev); - if (err) { - mlx5_core_err(dev, "Failed initializing timeouts, aborting\n"); - return err; - } + mlx5_tout_set_def_val(dev); /* wait for firmware to accept initialization segments configurations */ @@ -1005,13 +1001,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) if (err) { mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n", mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT)); - goto err_tout_cleanup; + return err; } err = mlx5_cmd_init(dev); if (err) { mlx5_core_err(dev, "Failed initializing command interface, aborting\n"); - goto err_tout_cleanup; + return err; } mlx5_tout_query_iseg(dev); @@ -1075,18 +1071,16 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) mlx5_set_driver_version(dev); - mlx5_start_health_poll(dev); - err = mlx5_query_hca_caps(dev); if (err) { mlx5_core_err(dev, "query hca failed\n"); - goto stop_health; + goto reclaim_boot_pages; } + mlx5_start_health_poll(dev); + return 0; -stop_health: - mlx5_stop_health_poll(dev, boot); reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); err_disable_hca: @@ -1094,8 +1088,6 @@ err_disable_hca: err_cmd_cleanup: mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); -err_tout_cleanup: - mlx5_tout_cleanup(dev); return err; } @@ -1114,7 +1106,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) mlx5_core_disable_hca(dev, 0); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); - mlx5_tout_cleanup(dev); return 0; } @@ -1476,6 +1467,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) mlx5_debugfs_root); INIT_LIST_HEAD(&priv->traps); + err = mlx5_tout_init(dev); + if (err) { + mlx5_core_err(dev, "Failed initializing timeouts, aborting\n"); + goto err_timeout_init; + } + err = mlx5_health_init(dev); if (err) goto err_health_init; @@ -1501,6 +1498,8 @@ err_adev_init: err_pagealloc_init: mlx5_health_cleanup(dev); err_health_init: + mlx5_tout_cleanup(dev); +err_timeout_init: debugfs_remove(dev->priv.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); @@ -1518,6 +1517,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mlx5_adev_cleanup(dev); mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); + mlx5_tout_cleanup(dev); debugfs_remove_recursive(dev->priv.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); @@ -1604,12 +1604,28 @@ static void remove_one(struct pci_dev *pdev) mlx5_devlink_free(devlink); } +#define mlx5_pci_trace(dev, fmt, ...) ({ \ + struct mlx5_core_dev *__dev = (dev); \ + mlx5_core_info(__dev, "%s Device state = %d health sensors: %d pci_status: %d. " fmt, \ + __func__, __dev->state, mlx5_health_check_fatal_sensors(__dev), \ + __dev->pci_status, ##__VA_ARGS__); \ +}) + +static const char *result2str(enum pci_ers_result result) +{ + return result == PCI_ERS_RESULT_NEED_RESET ? "need reset" : + result == PCI_ERS_RESULT_DISCONNECT ? "disconnect" : + result == PCI_ERS_RESULT_RECOVERED ? "recovered" : + "unknown"; +} + static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + enum pci_ers_result res; - mlx5_core_info(dev, "%s was called\n", __func__); + mlx5_pci_trace(dev, "Enter, pci channel state = %d\n", state); mlx5_enter_error_state(dev, false); mlx5_error_sw_reset(dev); @@ -1617,8 +1633,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, mlx5_drain_health_wq(dev); mlx5_pci_disable_device(dev); - return state == pci_channel_io_perm_failure ? + res = state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; + + mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res)); + return res; } /* wait for the device to show vital signs by waiting @@ -1652,28 +1671,34 @@ static int wait_vital(struct pci_dev *pdev) static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) { + enum pci_ers_result res = PCI_ERS_RESULT_DISCONNECT; struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err; - mlx5_core_info(dev, "%s was called\n", __func__); + mlx5_pci_trace(dev, "Enter\n"); err = mlx5_pci_enable_device(dev); if (err) { mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n", __func__, err); - return PCI_ERS_RESULT_DISCONNECT; + goto out; } pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); - if (wait_vital(pdev)) { - mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__); - return PCI_ERS_RESULT_DISCONNECT; + err = wait_vital(pdev); + if (err) { + mlx5_core_err(dev, "%s: wait vital failed with error code: %d\n", + __func__, err); + goto out; } - return PCI_ERS_RESULT_RECOVERED; + res = PCI_ERS_RESULT_RECOVERED; +out: + mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res)); + return res; } static void mlx5_pci_resume(struct pci_dev *pdev) @@ -1681,14 +1706,12 @@ static void mlx5_pci_resume(struct pci_dev *pdev) struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err; - mlx5_core_info(dev, "%s was called\n", __func__); + mlx5_pci_trace(dev, "Enter, loading driver..\n"); err = mlx5_load_one(dev); - if (err) - mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n", - __func__, err); - else - mlx5_core_info(dev, "%s: device recovered\n", __func__); + + mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err, + !err ? "recovered" : "Failed"); } static const struct pci_error_handlers mlx5_err_handler = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c index 252d6017387d..17aa348989cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c @@ -247,7 +247,7 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) { struct mlx5_sf_hw_table *table; u16 max_ext_fn = 0; - u16 ext_base_id; + u16 ext_base_id = 0; u16 max_fn = 0; u16 base_id; int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 0d1f08bbf631..7e89d5980d5e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -47,7 +47,7 @@ static struct workqueue_struct *mlxsw_owq; struct mlxsw_core_port { struct devlink_port devlink_port; void *port_driver_priv; - u8 local_port; + u16 local_port; }; void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) @@ -77,7 +77,7 @@ struct mlxsw_core { bool enable_string_tlv; } emad; struct { - u8 *mapping; /* lag_id+port_index to local_port mapping */ + u16 *mapping; /* lag_id+port_index to local_port mapping */ } lag; struct mlxsw_res res; struct mlxsw_hwmon *hwmon; @@ -718,7 +718,7 @@ static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, } /* called with rcu read lock held */ -static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, +static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port, void *priv) { struct mlxsw_core *mlxsw_core = priv; @@ -1959,7 +1959,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { - alloc_size = sizeof(u8) * + alloc_size = sizeof(*mlxsw_core->lag.mapping) * MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) * MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); @@ -2130,7 +2130,7 @@ int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, EXPORT_SYMBOL(mlxsw_core_skb_transmit); void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { if (mlxsw_core->driver->ptp_transmitted) mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb, @@ -2208,7 +2208,7 @@ mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core, rxl_item->enabled = enabled; } -static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, +static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port, void *priv) { struct mlxsw_event_listener_item *event_listener_item = priv; @@ -2641,7 +2641,7 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, { struct mlxsw_rx_listener_item *rxl_item; const struct mlxsw_rx_listener *rxl; - u8 local_port; + u16 local_port; bool found = false; if (rx_info->is_lag) { @@ -2699,7 +2699,7 @@ static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, } void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 port_index, u8 local_port) + u16 lag_id, u8 port_index, u16 local_port) { int index = mlxsw_core_lag_mapping_index(mlxsw_core, lag_id, port_index); @@ -2708,8 +2708,8 @@ void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); -u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 port_index) +u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index) { int index = mlxsw_core_lag_mapping_index(mlxsw_core, lag_id, port_index); @@ -2719,7 +2719,7 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 local_port) + u16 lag_id, u16 local_port) { int i; @@ -2747,7 +2747,7 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_res_get); -static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, +static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, enum devlink_port_flavour flavour, u32 port_number, bool split, u32 split_port_subnumber, @@ -2778,7 +2778,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, return err; } -static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) +static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) { struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; @@ -2788,7 +2788,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); } -int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, u32 port_number, bool split, u32 split_port_subnumber, bool splittable, u32 lanes, @@ -2810,7 +2810,7 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, } EXPORT_SYMBOL(mlxsw_core_port_init); -void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) +void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) { atomic_dec(&mlxsw_core->active_ports_count); @@ -2845,7 +2845,7 @@ void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core) } EXPORT_SYMBOL(mlxsw_core_cpu_port_fini); -void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv, struct net_device *dev) { struct mlxsw_core_port *mlxsw_core_port = @@ -2857,7 +2857,7 @@ void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, } EXPORT_SYMBOL(mlxsw_core_port_eth_set); -void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv) { struct mlxsw_core_port *mlxsw_core_port = @@ -2869,7 +2869,7 @@ void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, } EXPORT_SYMBOL(mlxsw_core_port_ib_set); -void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv) { struct mlxsw_core_port *mlxsw_core_port = @@ -2882,7 +2882,7 @@ void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, EXPORT_SYMBOL(mlxsw_core_port_clear); enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, - u8 local_port) + u16 local_port) { struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; @@ -2895,7 +2895,7 @@ EXPORT_SYMBOL(mlxsw_core_port_type_get); struct devlink_port * mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, - u8 local_port) + u16 local_port) { struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; @@ -2905,7 +2905,7 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get); -bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port) +bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port) { const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; int i; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 12023a550007..f30bb8614e69 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -54,7 +54,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload); struct mlxsw_tx_info { - u8 local_port; + u16 local_port; bool is_emad; }; @@ -67,7 +67,7 @@ struct mlxsw_rx_md_info { u16 tx_sys_port; u16 tx_lag_id; }; - u8 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */ + u16 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */ u8 tx_tc; u8 latency_valid:1, tx_congestion_valid:1, @@ -82,11 +82,11 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, u8 local_port); + struct sk_buff *skb, u16 local_port); struct mlxsw_rx_listener { - void (*func)(struct sk_buff *skb, u8 local_port, void *priv); - u8 local_port; + void (*func)(struct sk_buff *skb, u16 local_port, void *priv); + u16 local_port; u8 mirror_reason; u16 trap_id; }; @@ -209,7 +209,7 @@ struct mlxsw_rx_info { u16 sys_port; u16 lag_id; } u; - u8 lag_port_index; + u16 lag_port_index; u8 mirror_reason; int trap_id; }; @@ -218,36 +218,36 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, struct mlxsw_rx_info *rx_info); void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 port_index, u8 local_port); -u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 port_index); + u16 lag_id, u8 port_index, u16 local_port); +u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index); void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 local_port); + u16 lag_id, u16 local_port); void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port); -int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, u32 port_number, bool split, u32 split_port_subnumber, bool splittable, u32 lanes, const unsigned char *switch_id, unsigned char switch_id_len); -void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port); +void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port); int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, void *port_driver_priv, const unsigned char *switch_id, unsigned char switch_id_len); void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core); -void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv, struct net_device *dev); -void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv); -void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv); enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, - u8 local_port); + u16 local_port); struct devlink_port * mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, - u8 local_port); -bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port); + u16 local_port); +bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port); struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); @@ -316,11 +316,11 @@ struct mlxsw_driver { struct netlink_ext_ack *extack); void (*fini)(struct mlxsw_core *mlxsw_core); int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core); - int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port, + int (*port_type_set)(struct mlxsw_core *mlxsw_core, u16 local_port, enum devlink_port_type new_type); - int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port, + int (*port_split)(struct mlxsw_core *mlxsw_core, u16 local_port, unsigned int count, struct netlink_ext_ack *extack); - int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port, + int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port, struct netlink_ext_ack *extack); int (*sb_pool_get)(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, @@ -394,7 +394,7 @@ struct mlxsw_driver { * is responsible for freeing the passed-in SKB. */ void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, u8 local_port); + struct sk_buff *skb, u16 local_port); u8 txhdr_len; const struct mlxsw_config_profile *profile; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 78d9c0196f2b..77e82e6cf6e8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -113,7 +113,7 @@ static const struct rhashtable_params mlxsw_afa_set_ht_params = { }; struct mlxsw_afa_fwd_entry_ht_key { - u8 local_port; + u16 local_port; }; struct mlxsw_afa_fwd_entry { @@ -555,7 +555,7 @@ int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block) EXPORT_SYMBOL(mlxsw_afa_block_terminate); static struct mlxsw_afa_fwd_entry * -mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) +mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port) { struct mlxsw_afa_fwd_entry *fwd_entry; int err; @@ -598,7 +598,7 @@ static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa, } static struct mlxsw_afa_fwd_entry * -mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port) +mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port) { struct mlxsw_afa_fwd_entry_ht_key ht_key = {0}; struct mlxsw_afa_fwd_entry *fwd_entry; @@ -647,7 +647,7 @@ mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block, } static struct mlxsw_afa_fwd_entry_ref * -mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port) +mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u16 local_port) { struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; struct mlxsw_afa_fwd_entry *fwd_entry; @@ -1352,7 +1352,7 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward); struct mlxsw_afa_mirror { struct mlxsw_afa_resource resource; int span_id; - u8 local_in_port; + u16 local_in_port; bool ingress; }; @@ -1379,7 +1379,7 @@ mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block, } static struct mlxsw_afa_mirror * -mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port, +mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u16 local_in_port, const struct net_device *out_dev, bool ingress) { struct mlxsw_afa_mirror *mirror; @@ -1423,7 +1423,7 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block, } int -mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port, +mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u16 local_in_port, const struct net_device *out_dev, bool ingress, struct netlink_ext_ack *extack) { @@ -1663,7 +1663,7 @@ mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type, } int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, - u8 local_port, bool in_port, + u16 local_port, bool in_port, struct netlink_ext_ack *extack) { struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; @@ -2038,7 +2038,7 @@ static void mlxsw_afa_sampler_pack(char *payload, u8 mirror_agent, u32 rate) struct mlxsw_afa_sampler { struct mlxsw_afa_resource resource; int span_id; - u8 local_port; + u16 local_port; bool ingress; }; @@ -2061,7 +2061,7 @@ static void mlxsw_afa_sampler_destructor(struct mlxsw_afa_block *block, } static struct mlxsw_afa_sampler * -mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u8 local_port, +mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, struct netlink_ext_ack *extack) @@ -2104,7 +2104,7 @@ mlxsw_afa_block_append_allocated_sampler(struct mlxsw_afa_block *block, return 0; } -int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port, +int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index b65bf98eb5ab..16cbd6acbb01 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -17,24 +17,24 @@ struct mlxsw_afa_ops { void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first); int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index, bool *activity); - int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port); + int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u16 local_port); void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); int (*counter_index_get)(void *priv, unsigned int *p_counter_index); void (*counter_index_put)(void *priv, unsigned int counter_index); - int (*mirror_add)(void *priv, u8 local_in_port, + int (*mirror_add)(void *priv, u16 local_in_port, const struct net_device *out_dev, bool ingress, int *p_span_id); - void (*mirror_del)(void *priv, u8 local_in_port, int span_id, + void (*mirror_del)(void *priv, u16 local_in_port, int span_id, bool ingress); int (*policer_add)(void *priv, u64 rate_bytes_ps, u32 burst, u16 *p_policer_index, struct netlink_ext_ack *extack); void (*policer_del)(void *priv, u16 policer_index); - int (*sampler_add)(void *priv, u8 local_port, + int (*sampler_add)(void *priv, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, int *p_span_id, struct netlink_ext_ack *extack); - void (*sampler_del)(void *priv, u8 local_port, int span_id, + void (*sampler_del)(void *priv, u16 local_port, int span_id, bool ingress); bool dummy_first_set; }; @@ -62,12 +62,12 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, - u8 local_in_port, + u16 local_in_port, const struct net_device *out_dev, bool ingress, struct netlink_ext_ack *extack); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, - u8 local_port, bool in_port, + u16 local_port, bool in_port, struct netlink_ext_ack *extack); int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, u16 vid, u8 pcp, u8 et, @@ -98,7 +98,7 @@ int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block, u32 fa_index, u64 rate_bytes_ps, u32 burst, u16 *p_policer_index, struct netlink_ext_ack *extack); -int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port, +int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index ab70a873a01a..cfafbeb42586 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -367,6 +367,42 @@ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ } +#define LOCAL_PORT_LSB_SIZE 8 +#define LOCAL_PORT_MSB_SIZE 2 + +#define MLXSW_ITEM32_LP(_type, _cname, _offset1, _shift1, _offset2, _shift2) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, local_port) = { \ + .offset = _offset1, \ + .shift = _shift1, \ + .size = {.bits = LOCAL_PORT_LSB_SIZE,}, \ + .name = #_type "_" #_cname "_local_port", \ +}; \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, lp_msb) = { \ + .offset = _offset2, \ + .shift = _shift2, \ + .size = {.bits = LOCAL_PORT_MSB_SIZE,}, \ + .name = #_type "_" #_cname "_lp_msb", \ +}; \ +static inline u32 __maybe_unused \ +mlxsw_##_type##_##_cname##_local_port_get(const char *buf) \ +{ \ + u32 local_port, lp_msb; \ + \ + local_port = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, \ + local_port), 0); \ + lp_msb = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, lp_msb), \ + 0); \ + return (lp_msb << LOCAL_PORT_LSB_SIZE) + local_port; \ +} \ +static inline void __maybe_unused \ +mlxsw_##_type##_##_cname##_local_port_set(char *buf, u32 val) \ +{ \ + __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, local_port), 0, \ + val & ((1 << LOCAL_PORT_LSB_SIZE) - 1)); \ + __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, lp_msb), 0, \ + val >> LOCAL_PORT_LSB_SIZE); \ +} + #define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ _step, _instepoffset, _norealshift) \ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index 5d4dfa5ddbb5..10d13f5f9c7d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -38,7 +38,7 @@ struct mlxsw_m { struct mlxsw_m_port { struct net_device *dev; struct mlxsw_m *mlxsw_m; - u8 local_port; + u16 local_port; u8 module; }; @@ -180,7 +180,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = { }; static int -mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u8 local_port, +mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port, u8 *p_module, u8 *p_width) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; @@ -214,7 +214,7 @@ mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port) } static int -mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module) +mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module) { struct mlxsw_m_port *mlxsw_m_port; struct net_device *dev; @@ -277,7 +277,7 @@ err_alloc_etherdev: return err; } -static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port) +static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u16 local_port) { struct mlxsw_m_port *mlxsw_m_port = mlxsw_m->ports[local_port]; @@ -288,7 +288,7 @@ static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port) mlxsw_core_port_fini(mlxsw_m->core, local_port); } -static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port, +static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port, u8 *last_module) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 8d420eb8ade2..f748b537bdab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -69,52 +69,6 @@ MLXSW_REG_DEFINE(spad, MLXSW_REG_SPAD_ID, MLXSW_REG_SPAD_LEN); */ MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); -/* SMID - Switch Multicast ID - * -------------------------- - * The MID record maps from a MID (Multicast ID), which is a unique identifier - * of the multicast group within the stacking domain, into a list of local - * ports into which the packet is replicated. - */ -#define MLXSW_REG_SMID_ID 0x2007 -#define MLXSW_REG_SMID_LEN 0x240 - -MLXSW_REG_DEFINE(smid, MLXSW_REG_SMID_ID, MLXSW_REG_SMID_LEN); - -/* reg_smid_swid - * Switch partition ID. - * Access: Index - */ -MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8); - -/* reg_smid_mid - * Multicast identifier - global identifier that represents the multicast group - * across all devices. - * Access: Index - */ -MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16); - -/* reg_smid_port - * Local port memebership (1 bit per port). - * Access: RW - */ -MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1); - -/* reg_smid_port_mask - * Local port mask (1 bit per port). - * Access: W - */ -MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1); - -static inline void mlxsw_reg_smid_pack(char *payload, u16 mid, - u8 port, bool set) -{ - MLXSW_REG_ZERO(smid, payload); - mlxsw_reg_smid_swid_set(payload, 0); - mlxsw_reg_smid_mid_set(payload, mid); - mlxsw_reg_smid_port_set(payload, port, set); - mlxsw_reg_smid_port_mask_set(payload, port, 1); -} - /* SSPR - Switch System Port Record Register * ----------------------------------------- * Configures the system port to local port mapping. @@ -141,7 +95,7 @@ MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1); * * Access: RW */ -MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12); /* reg_sspr_sub_port * Virtual port within the physical port. @@ -161,7 +115,7 @@ MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8); */ MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16); -static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_sspr_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(sspr, payload); mlxsw_reg_sspr_m_set(payload, 1); @@ -407,7 +361,7 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, enum mlxsw_reg_sfd_rec_policy policy, const char *mac, u16 fid_vid, enum mlxsw_reg_sfd_rec_action action, - u8 local_port) + u16 local_port) { mlxsw_reg_sfd_rec_pack(payload, rec_index, MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action); @@ -417,15 +371,6 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port); } -static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index, - char *mac, u16 *p_fid_vid, - u8 *p_local_port) -{ - mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); - *p_fid_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); - *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index); -} - /* reg_sfd_uc_lag_sub_port * LAG sub port. * Must be 0 if multichannel VEPA is not enabled. @@ -478,15 +423,6 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index, mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id); } -static inline void mlxsw_reg_sfd_uc_lag_unpack(char *payload, int rec_index, - char *mac, u16 *p_vid, - u16 *p_lag_id) -{ - mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); - *p_vid = mlxsw_reg_sfd_uc_lag_fid_vid_get(payload, rec_index); - *p_lag_id = mlxsw_reg_sfd_uc_lag_lag_id_get(payload, rec_index); -} - /* reg_sfd_mc_pgi * * Multicast port group index - index into the port group table. @@ -568,19 +504,43 @@ static inline void mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index, enum mlxsw_reg_sfd_rec_policy policy, const char *mac, u16 fid, - enum mlxsw_reg_sfd_rec_action action, u32 uip, + enum mlxsw_reg_sfd_rec_action action, enum mlxsw_reg_sfd_uc_tunnel_protocol proto) { mlxsw_reg_sfd_rec_pack(payload, rec_index, MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac, action); mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); - mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24); - mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip); mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid); mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto); } +static inline void +mlxsw_reg_sfd_uc_tunnel_pack4(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 fid, + enum mlxsw_reg_sfd_rec_action action, u32 uip) +{ + mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24); + mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip); + mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index, policy, mac, fid, + action, + MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4); +} + +static inline void +mlxsw_reg_sfd_uc_tunnel_pack6(char *payload, int rec_index, const char *mac, + u16 fid, enum mlxsw_reg_sfd_rec_action action, + u32 uip_ptr) +{ + mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip_ptr); + /* Only static policy is supported for IPv6 unicast tunnel entry. */ + mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index, + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY, + mac, fid, action, + MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6); +} + enum mlxsw_reg_tunnel_port { MLXSW_REG_TUNNEL_PORT_NVE, MLXSW_REG_TUNNEL_PORT_VPLS, @@ -692,7 +652,7 @@ MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16, static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index, char *mac, u16 *p_vid, - u8 *p_local_port) + u16 *p_local_port) { mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac); *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index); @@ -781,7 +741,7 @@ MLXSW_REG_DEFINE(spms, MLXSW_REG_SPMS_ID, MLXSW_REG_SPMS_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spms, 0x00, 16, 0x00, 12); enum mlxsw_reg_spms_state { MLXSW_REG_SPMS_STATE_NO_CHANGE, @@ -800,7 +760,7 @@ enum mlxsw_reg_spms_state { */ MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2); -static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_spms_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(spms, payload); mlxsw_reg_spms_local_port_set(payload, local_port); @@ -833,7 +793,7 @@ MLXSW_ITEM32(reg, spvid, tport, 0x00, 24, 1); * When tport = 1: Tunnel port. * Access: Index */ -MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spvid, 0x00, 16, 0x00, 12); /* reg_spvid_sub_port * Virtual port within the physical port. @@ -868,7 +828,7 @@ MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2); */ MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12); -static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid, +static inline void mlxsw_reg_spvid_pack(char *payload, u16 local_port, u16 pvid, u8 et_vlan) { MLXSW_REG_ZERO(spvid, payload); @@ -911,7 +871,7 @@ MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spvm, 0x00, 16, 0x00, 12); /* reg_spvm_sub_port * Virtual port within the physical port. @@ -959,7 +919,7 @@ MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid, MLXSW_REG_SPVM_BASE_LEN, 0, 12, MLXSW_REG_SPVM_REC_LEN, 0, false); -static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_spvm_pack(char *payload, u16 local_port, u16 vid_begin, u16 vid_end, bool is_member, bool untagged) { @@ -994,7 +954,7 @@ MLXSW_REG_DEFINE(spaft, MLXSW_REG_SPAFT_ID, MLXSW_REG_SPAFT_LEN); * * Note: CPU port is not supported (all tag types are allowed). */ -MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spaft, 0x00, 16, 0x00, 12); /* reg_spaft_sub_port * Virtual port within the physical port. @@ -1021,7 +981,7 @@ MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1); */ MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1); -static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port, bool allow_untagged) { MLXSW_REG_ZERO(spaft, payload); @@ -1126,76 +1086,6 @@ mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type, mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID); } -/* SFTR - Switch Flooding Table Register - * ------------------------------------- - * The switch flooding table is used for flooding packet replication. The table - * defines a bit mask of ports for packet replication. - */ -#define MLXSW_REG_SFTR_ID 0x2012 -#define MLXSW_REG_SFTR_LEN 0x420 - -MLXSW_REG_DEFINE(sftr, MLXSW_REG_SFTR_ID, MLXSW_REG_SFTR_LEN); - -/* reg_sftr_swid - * Switch partition ID with which to associate the port. - * Access: Index - */ -MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8); - -/* reg_sftr_flood_table - * Flooding table index to associate with the specific type on the specific - * switch partition. - * Access: Index - */ -MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6); - -/* reg_sftr_index - * Index. Used as an index into the Flooding Table in case the table is - * configured to use VID / FID or FID Offset. - * Access: Index - */ -MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16); - -/* reg_sftr_table_type - * See mlxsw_flood_table_type - * Access: RW - */ -MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3); - -/* reg_sftr_range - * Range of entries to update - * Access: Index - */ -MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16); - -/* reg_sftr_port - * Local port membership (1 bit per port). - * Access: RW - */ -MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1); - -/* reg_sftr_cpu_port_mask - * CPU port mask (1 bit per port). - * Access: W - */ -MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1); - -static inline void mlxsw_reg_sftr_pack(char *payload, - unsigned int flood_table, - unsigned int index, - enum mlxsw_flood_table_type table_type, - unsigned int range, u8 port, bool set) -{ - MLXSW_REG_ZERO(sftr, payload); - mlxsw_reg_sftr_swid_set(payload, 0); - mlxsw_reg_sftr_flood_table_set(payload, flood_table); - mlxsw_reg_sftr_index_set(payload, index); - mlxsw_reg_sftr_table_type_set(payload, table_type); - mlxsw_reg_sftr_range_set(payload, range); - mlxsw_reg_sftr_port_set(payload, port, set); - mlxsw_reg_sftr_port_mask_set(payload, port, 1); -} - /* SFDF - Switch Filtering DB Flush * -------------------------------- * The switch filtering DB flush register is used to flush the FDB. @@ -1347,7 +1237,7 @@ MLXSW_ITEM32(reg, sldr, num_ports, 0x04, 24, 8); MLXSW_ITEM32_INDEXED(reg, sldr, system_port, 0x08, 0, 16, 4, 0, false); static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id, - u8 local_port) + u16 local_port) { MLXSW_REG_ZERO(sldr, payload); mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST); @@ -1357,7 +1247,7 @@ static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id, } static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id, - u8 local_port) + u16 local_port) { MLXSW_REG_ZERO(sldr, payload); mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST); @@ -1397,7 +1287,7 @@ MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1); * Reserved when pp = Global Configuration * Access: Index */ -MLXSW_ITEM32(reg, slcr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, slcr, 0x00, 16, 0x00, 12); enum mlxsw_reg_slcr_type { MLXSW_REG_SLCR_TYPE_CRC, /* default */ @@ -1515,7 +1405,7 @@ MLXSW_ITEM32(reg, slcor, col, 0x00, 30, 2); * Not supported for CPU port * Access: Index */ -MLXSW_ITEM32(reg, slcor, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, slcor, 0x00, 16, 0x00, 12); /* reg_slcor_lag_id * LAG Identifier. Index into the LAG descriptor table. @@ -1531,7 +1421,7 @@ MLXSW_ITEM32(reg, slcor, lag_id, 0x00, 0, 10); MLXSW_ITEM32(reg, slcor, port_index, 0x04, 0, 10); static inline void mlxsw_reg_slcor_pack(char *payload, - u8 local_port, u16 lag_id, + u16 local_port, u16 lag_id, enum mlxsw_reg_slcor_col col) { MLXSW_REG_ZERO(slcor, payload); @@ -1541,7 +1431,7 @@ static inline void mlxsw_reg_slcor_pack(char *payload, } static inline void mlxsw_reg_slcor_port_add_pack(char *payload, - u8 local_port, u16 lag_id, + u16 local_port, u16 lag_id, u8 port_index) { mlxsw_reg_slcor_pack(payload, local_port, lag_id, @@ -1550,21 +1440,21 @@ static inline void mlxsw_reg_slcor_port_add_pack(char *payload, } static inline void mlxsw_reg_slcor_port_remove_pack(char *payload, - u8 local_port, u16 lag_id) + u16 local_port, u16 lag_id) { mlxsw_reg_slcor_pack(payload, local_port, lag_id, MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT); } static inline void mlxsw_reg_slcor_col_enable_pack(char *payload, - u8 local_port, u16 lag_id) + u16 local_port, u16 lag_id) { mlxsw_reg_slcor_pack(payload, local_port, lag_id, MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED); } static inline void mlxsw_reg_slcor_col_disable_pack(char *payload, - u8 local_port, u16 lag_id) + u16 local_port, u16 lag_id) { mlxsw_reg_slcor_pack(payload, local_port, lag_id, MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED); @@ -1583,7 +1473,7 @@ MLXSW_REG_DEFINE(spmlr, MLXSW_REG_SPMLR_ID, MLXSW_REG_SPMLR_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spmlr, 0x00, 16, 0x00, 12); /* reg_spmlr_sub_port * Virtual port within the physical port. @@ -1611,7 +1501,7 @@ enum mlxsw_reg_spmlr_learn_mode { */ MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2); -static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_spmlr_pack(char *payload, u16 local_port, enum mlxsw_reg_spmlr_learn_mode mode) { MLXSW_REG_ZERO(spmlr, payload); @@ -1642,7 +1532,7 @@ MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8); * * Note: Reserved for 802.1Q FIDs. */ -MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, svfa, 0x00, 16, 0x00, 12); enum mlxsw_reg_svfa_mt { MLXSW_REG_SVFA_MT_VID_TO_FID, @@ -1696,7 +1586,7 @@ MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8); */ MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24); -static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_svfa_pack(char *payload, u16 local_port, enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, u16 vid) { @@ -1733,7 +1623,7 @@ MLXSW_ITEM32(reg, spvtr, tport, 0x00, 24, 1); * When tport = 1: tunnel port. * Access: Index */ -MLXSW_ITEM32(reg, spvtr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spvtr, 0x00, 16, 0x00, 12); /* reg_spvtr_ippe * Ingress Port Prio Mode Update Enable. @@ -1803,7 +1693,7 @@ enum mlxsw_reg_spvtr_epvid_mode { MLXSW_ITEM32(reg, spvtr, epvid_mode, 0x04, 0, 4); static inline void mlxsw_reg_spvtr_pack(char *payload, bool tport, - u8 local_port, + u16 local_port, enum mlxsw_reg_spvtr_ipvid_mode ipvid_mode) { MLXSW_REG_ZERO(spvtr, payload); @@ -1828,7 +1718,7 @@ MLXSW_REG_DEFINE(svpe, MLXSW_REG_SVPE_ID, MLXSW_REG_SVPE_LEN); * * Note: CPU port is not supported (uses VLAN mode only). */ -MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, svpe, 0x00, 16, 0x00, 12); /* reg_svpe_vp_en * Virtual port enable. @@ -1838,7 +1728,7 @@ MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1); -static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_svpe_pack(char *payload, u16 local_port, bool enable) { MLXSW_REG_ZERO(svpe, payload); @@ -1948,7 +1838,7 @@ MLXSW_REG_DEFINE(spvmlr, MLXSW_REG_SPVMLR_ID, MLXSW_REG_SPVMLR_LEN); * * Note: CPU port is not supported. */ -MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spvmlr, 0x00, 16, 0x00, 12); /* reg_spvmlr_num_rec * Number of records to update. @@ -1971,7 +1861,7 @@ MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN, MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false); -static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_spvmlr_pack(char *payload, u16 local_port, u16 vid_begin, u16 vid_end, bool learn_enable) { @@ -2009,7 +1899,7 @@ MLXSW_REG_DEFINE(spvc, MLXSW_REG_SPVC_ID, MLXSW_REG_SPVC_LEN); * through Rx port i and a Tx port j then port i and port j must have the * same configuration. */ -MLXSW_ITEM32(reg, spvc, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spvc, 0x00, 16, 0x00, 12); /* reg_spvc_inner_et2 * Vlan Tag1 EtherType2 enable. @@ -2074,7 +1964,7 @@ MLXSW_ITEM32(reg, spvc, inner_et0, 0x08, 1, 1); */ MLXSW_ITEM32(reg, spvc, et0, 0x08, 0, 1); -static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1, +static inline void mlxsw_reg_spvc_pack(char *payload, u16 local_port, bool et1, bool et0) { MLXSW_REG_ZERO(spvc, payload); @@ -2104,7 +1994,7 @@ MLXSW_REG_DEFINE(spevet, MLXSW_REG_SPEVET_ID, MLXSW_REG_SPEVET_LEN); * Not supported to CPU port. * Access: Index */ -MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spevet, 0x00, 16, 0x00, 12); /* reg_spevet_et_vlan * Egress EtherType VLAN to push when SPVID.egr_et_set field set for the packet: @@ -2115,7 +2005,7 @@ MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, spevet, et_vlan, 0x04, 16, 2); -static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_spevet_pack(char *payload, u16 local_port, u8 et_vlan) { MLXSW_REG_ZERO(spevet, payload); @@ -2123,6 +2013,122 @@ static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port, mlxsw_reg_spevet_et_vlan_set(payload, et_vlan); } +/* SFTR-V2 - Switch Flooding Table Version 2 Register + * -------------------------------------------------- + * The switch flooding table is used for flooding packet replication. The table + * defines a bit mask of ports for packet replication. + */ +#define MLXSW_REG_SFTR2_ID 0x202F +#define MLXSW_REG_SFTR2_LEN 0x120 + +MLXSW_REG_DEFINE(sftr2, MLXSW_REG_SFTR2_ID, MLXSW_REG_SFTR2_LEN); + +/* reg_sftr2_swid + * Switch partition ID with which to associate the port. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr2, swid, 0x00, 24, 8); + +/* reg_sftr2_flood_table + * Flooding table index to associate with the specific type on the specific + * switch partition. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr2, flood_table, 0x00, 16, 6); + +/* reg_sftr2_index + * Index. Used as an index into the Flooding Table in case the table is + * configured to use VID / FID or FID Offset. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr2, index, 0x00, 0, 16); + +/* reg_sftr2_table_type + * See mlxsw_flood_table_type + * Access: RW + */ +MLXSW_ITEM32(reg, sftr2, table_type, 0x04, 16, 3); + +/* reg_sftr2_range + * Range of entries to update + * Access: Index + */ +MLXSW_ITEM32(reg, sftr2, range, 0x04, 0, 16); + +/* reg_sftr2_port + * Local port membership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port, 0x20, 0x80, 1); + +/* reg_sftr2_port_mask + * Local port mask (1 bit per port). + * Access: WO + */ +MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port_mask, 0xA0, 0x80, 1); + +static inline void mlxsw_reg_sftr2_pack(char *payload, + unsigned int flood_table, + unsigned int index, + enum mlxsw_flood_table_type table_type, + unsigned int range, u16 port, bool set) +{ + MLXSW_REG_ZERO(sftr2, payload); + mlxsw_reg_sftr2_swid_set(payload, 0); + mlxsw_reg_sftr2_flood_table_set(payload, flood_table); + mlxsw_reg_sftr2_index_set(payload, index); + mlxsw_reg_sftr2_table_type_set(payload, table_type); + mlxsw_reg_sftr2_range_set(payload, range); + mlxsw_reg_sftr2_port_set(payload, port, set); + mlxsw_reg_sftr2_port_mask_set(payload, port, 1); +} + +/* SMID-V2 - Switch Multicast ID Version 2 Register + * ------------------------------------------------ + * The MID record maps from a MID (Multicast ID), which is a unique identifier + * of the multicast group within the stacking domain, into a list of local + * ports into which the packet is replicated. + */ +#define MLXSW_REG_SMID2_ID 0x2034 +#define MLXSW_REG_SMID2_LEN 0x120 + +MLXSW_REG_DEFINE(smid2, MLXSW_REG_SMID2_ID, MLXSW_REG_SMID2_LEN); + +/* reg_smid2_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, smid2, swid, 0x00, 24, 8); + +/* reg_smid2_mid + * Multicast identifier - global identifier that represents the multicast group + * across all devices. + * Access: Index + */ +MLXSW_ITEM32(reg, smid2, mid, 0x00, 0, 16); + +/* reg_smid2_port + * Local port memebership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid2, port, 0x20, 0x80, 1); + +/* reg_smid2_port_mask + * Local port mask (1 bit per port). + * Access: WO + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid2, port_mask, 0xA0, 0x80, 1); + +static inline void mlxsw_reg_smid2_pack(char *payload, u16 mid, u16 port, + bool set) +{ + MLXSW_REG_ZERO(smid2, payload); + mlxsw_reg_smid2_swid_set(payload, 0); + mlxsw_reg_smid2_mid_set(payload, mid); + mlxsw_reg_smid2_port_set(payload, port, set); + mlxsw_reg_smid2_port_mask_set(payload, port, 1); +} + /* CWTP - Congetion WRED ECN TClass Profile * ---------------------------------------- * Configures the profiles for queues of egress port and traffic class @@ -2139,7 +2145,7 @@ MLXSW_REG_DEFINE(cwtp, MLXSW_REG_CWTP_ID, MLXSW_REG_CWTP_LEN); * Not supported for CPU port * Access: Index */ -MLXSW_ITEM32(reg, cwtp, local_port, 0, 16, 8); +MLXSW_ITEM32_LP(reg, cwtp, 0x00, 16, 0x00, 12); /* reg_cwtp_traffic_class * Traffic Class to configure @@ -2173,7 +2179,7 @@ MLXSW_ITEM32_INDEXED(reg, cwtp, profile_max, MLXSW_REG_CWTP_BASE_LEN, #define MLXSW_REG_CWTP_MAX_PROFILE 2 #define MLXSW_REG_CWTP_DEFAULT_PROFILE 1 -static inline void mlxsw_reg_cwtp_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_cwtp_pack(char *payload, u16 local_port, u8 traffic_class) { int i; @@ -2217,7 +2223,7 @@ MLXSW_REG_DEFINE(cwtpm, MLXSW_REG_CWTPM_ID, MLXSW_REG_CWTPM_LEN); * Not supported for CPU port * Access: Index */ -MLXSW_ITEM32(reg, cwtpm, local_port, 0, 16, 8); +MLXSW_ITEM32_LP(reg, cwtpm, 0x00, 16, 0x00, 12); /* reg_cwtpm_traffic_class * Traffic Class to configure @@ -2291,7 +2297,7 @@ MLXSW_ITEM32(reg, cwtpm, ntcp_r, 64, 0, 2); #define MLXSW_REG_CWTPM_RESET_PROFILE 0 -static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_cwtpm_pack(char *payload, u16 local_port, u8 traffic_class, u8 profile, bool wred, bool ecn) { @@ -2363,7 +2369,7 @@ MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3); * Local port. Not including CPU port. * Access: Index */ -MLXSW_ITEM32(reg, ppbt, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, ppbt, 0x00, 16, 0x00, 12); /* reg_ppbt_g * group - When set, the binding is of an ACL group. When cleared, @@ -2382,7 +2388,7 @@ MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16); static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e, enum mlxsw_reg_pxbt_op op, - u8 local_port, u16 acl_info) + u16 local_port, u16 acl_info) { MLXSW_REG_ZERO(ppbt, payload); mlxsw_reg_ppbt_e_set(payload, e); @@ -3513,7 +3519,7 @@ MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN); * * Note: CPU port is supported. */ -MLXSW_ITEM32(reg, qpts, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qpts, 0x00, 16, 0x00, 12); enum mlxsw_reg_qpts_trust_state { MLXSW_REG_QPTS_TRUST_STATE_PCP = 1, @@ -3526,7 +3532,7 @@ enum mlxsw_reg_qpts_trust_state { */ MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3); -static inline void mlxsw_reg_qpts_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qpts_pack(char *payload, u16 local_port, enum mlxsw_reg_qpts_trust_state ts) { MLXSW_REG_ZERO(qpts, payload); @@ -3717,7 +3723,7 @@ MLXSW_REG_DEFINE(qtct, MLXSW_REG_QTCT_ID, MLXSW_REG_QTCT_LEN); * * Note: CPU port is not supported. */ -MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qtct, 0x00, 16, 0x00, 12); /* reg_qtct_sub_port * Virtual port within the physical port. @@ -3742,7 +3748,7 @@ MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4); */ MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4); -static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qtct_pack(char *payload, u16 local_port, u8 switch_prio, u8 tclass) { MLXSW_REG_ZERO(qtct, payload); @@ -3766,7 +3772,7 @@ MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN); * * Note: CPU port is supported. */ -MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qeec, 0x00, 16, 0x00, 12); enum mlxsw_reg_qeec_hr { MLXSW_REG_QEEC_HR_PORT, @@ -3909,7 +3915,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6); #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11 #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11 -static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qeec_pack(char *payload, u16 local_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index) { @@ -3920,7 +3926,7 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port, mlxsw_reg_qeec_next_element_index_set(payload, next_index); } -static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u16 local_port, bool ptps) { MLXSW_REG_ZERO(qeec, payload); @@ -3944,7 +3950,7 @@ MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN); * * Note: CPU port is supported. No support for router port. */ -MLXSW_ITEM32(reg, qrwe, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qrwe, 0x00, 16, 0x00, 12); /* reg_qrwe_dscp * Whether to enable DSCP rewrite (default is 0, don't rewrite). @@ -3958,7 +3964,7 @@ MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1); */ MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1); -static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qrwe_pack(char *payload, u16 local_port, bool rewrite_pcp, bool rewrite_dscp) { MLXSW_REG_ZERO(qrwe, payload); @@ -3985,7 +3991,7 @@ MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN); * Local Port. Supported for data packets from CPU port. * Access: Index */ -MLXSW_ITEM32(reg, qpdsm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qpdsm, 0x00, 16, 0x00, 12); /* reg_qpdsm_prio_entry_color0_e * Enable update of the entry for color 0 and a given port. @@ -4038,7 +4044,7 @@ MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp, MLXSW_REG_QPDSM_BASE_LEN, 8, 6, MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); -static inline void mlxsw_reg_qpdsm_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_qpdsm_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(qpdsm, payload); mlxsw_reg_qpdsm_local_port_set(payload, local_port); @@ -4071,7 +4077,7 @@ MLXSW_REG_DEFINE(qpdp, MLXSW_REG_QPDP_ID, MLXSW_REG_QPDP_LEN); * Local Port. Supported for data packets from CPU port. * Access: Index */ -MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qpdp, 0x00, 16, 0x00, 12); /* reg_qpdp_switch_prio * Default port Switch Priority (default 0) @@ -4079,7 +4085,7 @@ MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, qpdp, switch_prio, 0x04, 0, 4); -static inline void mlxsw_reg_qpdp_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qpdp_pack(char *payload, u16 local_port, u8 switch_prio) { MLXSW_REG_ZERO(qpdp, payload); @@ -4106,7 +4112,7 @@ MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN); * Local Port. Supported for data packets from CPU port. * Access: Index */ -MLXSW_ITEM32(reg, qpdpm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qpdpm, 0x00, 16, 0x00, 12); /* reg_qpdpm_dscp_e * Enable update of the specific entry. When cleared, the switch_prio and color @@ -4125,7 +4131,7 @@ MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio, MLXSW_REG_QPDPM_BASE_LEN, 0, 4, MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false); -static inline void mlxsw_reg_qpdpm_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_qpdpm_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(qpdpm, payload); mlxsw_reg_qpdpm_local_port_set(payload, local_port); @@ -4157,7 +4163,7 @@ MLXSW_REG_DEFINE(qtctm, MLXSW_REG_QTCTM_ID, MLXSW_REG_QTCTM_LEN); * No support for CPU port. * Access: Index */ -MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qtctm, 0x00, 16, 0x00, 12); /* reg_qtctm_mc * Multicast Mode @@ -4167,7 +4173,7 @@ MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8); MLXSW_ITEM32(reg, qtctm, mc, 0x04, 0, 1); static inline void -mlxsw_reg_qtctm_pack(char *payload, u8 local_port, bool mc) +mlxsw_reg_qtctm_pack(char *payload, u16 local_port, bool mc) { MLXSW_REG_ZERO(qtctm, payload); mlxsw_reg_qtctm_local_port_set(payload, local_port); @@ -4300,7 +4306,7 @@ MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pmlp, 0x00, 16, 0x00, 12); /* reg_pmlp_width * 0 - Unmap local port. @@ -4331,7 +4337,7 @@ MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 4, 0x04, 0x00, false); */ MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 4, 0x04, 0x00, false); -static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_pmlp_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(pmlp, payload); mlxsw_reg_pmlp_local_port_set(payload, local_port); @@ -4350,7 +4356,7 @@ MLXSW_REG_DEFINE(pmtu, MLXSW_REG_PMTU_ID, MLXSW_REG_PMTU_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pmtu, 0x00, 16, 0x00, 12); /* reg_pmtu_max_mtu * Maximum MTU. @@ -4378,7 +4384,7 @@ MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16); */ MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16); -static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_pmtu_pack(char *payload, u16 local_port, u16 new_mtu) { MLXSW_REG_ZERO(pmtu, payload); @@ -4412,7 +4418,7 @@ MLXSW_ITEM32(reg, ptys, an_disable_admin, 0x00, 30, 1); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, ptys, 0x00, 16, 0x00, 12); #define MLXSW_REG_PTYS_PROTO_MASK_IB BIT(0) #define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2) @@ -4572,7 +4578,7 @@ enum mlxsw_reg_ptys_connector_type { */ MLXSW_ITEM32(reg, ptys, connector_type, 0x2C, 0, 4); -static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_ptys_eth_pack(char *payload, u16 local_port, u32 proto_admin, bool autoneg) { MLXSW_REG_ZERO(ptys, payload); @@ -4582,7 +4588,7 @@ static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port, mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg); } -static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u16 local_port, u32 proto_admin, bool autoneg) { MLXSW_REG_ZERO(ptys, payload); @@ -4624,7 +4630,7 @@ static inline void mlxsw_reg_ptys_ext_eth_unpack(char *payload, mlxsw_reg_ptys_ext_eth_proto_oper_get(payload); } -static inline void mlxsw_reg_ptys_ib_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_ptys_ib_pack(char *payload, u16 local_port, u16 proto_admin, u16 link_width) { MLXSW_REG_ZERO(ptys, payload); @@ -4672,7 +4678,7 @@ MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1); * port number, if single_base_mac = 0 then local_port is reserved * Access: RW */ -MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, ppad, 0x00, 16, 0x00, 24); /* reg_ppad_mac * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved. @@ -4682,7 +4688,7 @@ MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8); MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6); static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac, - u8 local_port) + u16 local_port) { MLXSW_REG_ZERO(ppad, payload); mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac); @@ -4711,7 +4717,7 @@ MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, paos, 0x00, 16, 0x00, 12); /* reg_paos_admin_status * Port administrative state (the desired state of the port): @@ -4756,7 +4762,7 @@ MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1); */ MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2); -static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_paos_pack(char *payload, u16 local_port, enum mlxsw_port_admin_status status) { MLXSW_REG_ZERO(paos, payload); @@ -4782,7 +4788,7 @@ MLXSW_REG_DEFINE(pfcc, MLXSW_REG_PFCC_ID, MLXSW_REG_PFCC_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pfcc, 0x00, 16, 0x00, 12); /* reg_pfcc_pnat * Port number access type. Determines the way local_port is interpreted: @@ -4899,7 +4905,7 @@ static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en) mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en); } -static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_pfcc_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(pfcc, payload); mlxsw_reg_pfcc_local_port_set(payload, local_port); @@ -4928,11 +4934,9 @@ MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8); /* reg_ppcnt_local_port * Local port number. - * 255 indicates all ports on the device, and is only allowed - * for Set() operation. * Access: Index */ -MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, ppcnt, 0x00, 16, 0x00, 12); /* reg_ppcnt_pnat * Port number access type: @@ -4981,6 +4985,14 @@ MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6); */ MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1); +/* reg_ppcnt_lp_gl + * Local port global variable. + * 0: local_port 255 = all ports of the device. + * 1: local_port indicates local port number for all ports. + * Access: OP + */ +MLXSW_ITEM32(reg, ppcnt, lp_gl, 0x04, 30, 1); + /* reg_ppcnt_prio_tc * Priority for counter set that support per priority, valid values: 0-7. * Traffic class for counter set that support per traffic class, @@ -5404,7 +5416,7 @@ MLXSW_ITEM64(reg, ppcnt, wred_discard, MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); -static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_ppcnt_pack(char *payload, u16 local_port, enum mlxsw_reg_ppcnt_grp grp, u8 prio_tc) { @@ -5414,6 +5426,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, mlxsw_reg_ppcnt_pnat_set(payload, 0); mlxsw_reg_ppcnt_grp_set(payload, grp); mlxsw_reg_ppcnt_clr_set(payload, 0); + mlxsw_reg_ppcnt_lp_gl_set(payload, 1); mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc); } @@ -5430,7 +5443,7 @@ MLXSW_REG_DEFINE(plib, MLXSW_REG_PLIB_ID, MLXSW_REG_PLIB_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, plib, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, plib, 0x00, 16, 0x00, 12); /* reg_plib_ib_port * InfiniBand port remapping for local_port. @@ -5468,7 +5481,7 @@ MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pptb, 0x00, 16, 0x00, 12); /* reg_pptb_um * Enables the update of the untagged_buf field. @@ -5515,7 +5528,7 @@ MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4); #define MLXSW_REG_PPTB_ALL_PRIO 0xFF -static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_pptb_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(pptb, payload); mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM); @@ -5545,7 +5558,7 @@ MLXSW_REG_DEFINE(pbmc, MLXSW_REG_PBMC_ID, MLXSW_REG_PBMC_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pbmc, 0x00, 16, 0x00, 12); /* reg_pbmc_xoff_timer_value * When device generates a pause frame, it uses this value as the pause @@ -5612,7 +5625,7 @@ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16, MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16, 0x08, 0x04, false); -static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_pbmc_pack(char *payload, u16 local_port, u16 xoff_timer_value, u16 xoff_refresh) { MLXSW_REG_ZERO(pbmc, payload); @@ -5661,7 +5674,7 @@ MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pspa, 0x00, 16, 0x00, 0); /* reg_pspa_sub_port * Virtual port within the local port. Set to 0 when virtual ports are @@ -5670,7 +5683,7 @@ MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8); -static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) +static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u16 local_port) { MLXSW_REG_ZERO(pspa, payload); mlxsw_reg_pspa_swid_set(payload, swid); @@ -5772,7 +5785,7 @@ MLXSW_REG_DEFINE(pplr, MLXSW_REG_PPLR_ID, MLXSW_REG_PPLR_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pplr, 0x00, 16, 0x00, 12); /* Phy local loopback. When set the port's egress traffic is looped back * to the receiver and the port transmitter is disabled. @@ -5785,7 +5798,7 @@ MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, pplr, lb_en, 0x04, 0, 8); -static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_pplr_pack(char *payload, u16 local_port, bool phy_local) { MLXSW_REG_ZERO(pplr, payload); @@ -5846,7 +5859,7 @@ MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4); * the module. * Access: RO */ -MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false); +MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 10, 0x02, 0x00, false); static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module, u8 ports_width, u8 num_ports) @@ -5915,7 +5928,7 @@ MLXSW_REG_DEFINE(pddr, MLXSW_REG_PDDR_ID, MLXSW_REG_PDDR_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pddr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pddr, 0x00, 16, 0x00, 12); enum mlxsw_reg_pddr_page_select { MLXSW_REG_PDDR_PAGE_SELECT_TROUBLESHOOTING_INFO = 1, @@ -5944,7 +5957,7 @@ MLXSW_ITEM32(reg, pddr, trblsh_group_opcode, 0x08, 0, 16); */ MLXSW_ITEM32(reg, pddr, trblsh_status_opcode, 0x0C, 0, 16); -static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_pddr_pack(char *payload, u16 local_port, u8 page_select) { MLXSW_REG_ZERO(pddr, payload); @@ -6014,7 +6027,7 @@ MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pllp, 0x00, 16, 0x00, 12); /* reg_pllp_label_port * Front panel label of the port. @@ -6034,7 +6047,7 @@ MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4); */ MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4); -static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_pllp_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(pllp, payload); mlxsw_reg_pllp_local_port_set(payload, local_port); @@ -10245,7 +10258,7 @@ MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN); * The local port to mirror the packets from. * Access: Index */ -MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, mpar, 0x00, 16, 0x00, 4); enum mlxsw_reg_mpar_i_e { MLXSW_REG_MPAR_TYPE_EGRESS, @@ -10282,7 +10295,7 @@ MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4); */ MLXSW_ITEM32(reg, mpar, probability_rate, 0x08, 0, 32); -static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_mpar_pack(char *payload, u16 local_port, enum mlxsw_reg_mpar_i_e i_e, bool enable, u8 pa_id, u32 probability_rate) @@ -10386,7 +10399,7 @@ MLXSW_REG_DEFINE(mlcr, MLXSW_REG_MLCR_ID, MLXSW_REG_MLCR_LEN); * Local port number. * Access: RW */ -MLXSW_ITEM32(reg, mlcr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, mlcr, 0x00, 16, 0x00, 24); #define MLXSW_REG_MLCR_DURATION_MAX 0xFFFF @@ -10405,7 +10418,7 @@ MLXSW_ITEM32(reg, mlcr, beacon_duration, 0x04, 0, 16); */ MLXSW_ITEM32(reg, mlcr, beacon_remain, 0x08, 0, 16); -static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_mlcr_pack(char *payload, u16 local_port, bool active) { MLXSW_REG_ZERO(mlcr, payload); @@ -10778,7 +10791,7 @@ MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN); * Not supported for CPU port * Access: Index */ -MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, mpsc, 0x00, 16, 0x00, 12); /* reg_mpsc_e * Enable sampling on port local_port @@ -10795,7 +10808,7 @@ MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1); */ MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32); -static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e, +static inline void mlxsw_reg_mpsc_pack(char *payload, u16 local_port, bool e, u32 rate) { MLXSW_REG_ZERO(mpsc, payload); @@ -11003,7 +11016,7 @@ MLXSW_REG_DEFINE(momte, MLXSW_REG_MOMTE_ID, MLXSW_REG_MOMTE_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, momte, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, momte, 0x00, 16, 0x00, 12); enum mlxsw_reg_momte_type { MLXSW_REG_MOMTE_TYPE_WRED = 0x20, @@ -11030,7 +11043,7 @@ MLXSW_ITEM32(reg, momte, type, 0x04, 0, 8); */ MLXSW_ITEM_BIT_ARRAY(reg, momte, tclass_en, 0x08, 0x08, 1); -static inline void mlxsw_reg_momte_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_momte_pack(char *payload, u16 local_port, enum mlxsw_reg_momte_type type) { MLXSW_REG_ZERO(momte, payload); @@ -11098,7 +11111,7 @@ MLXSW_REG_DEFINE(mtpptr, MLXSW_REG_MTPPTR_ID, MLXSW_REG_MTPPTR_LEN); * Not supported for CPU port. * Access: Index */ -MLXSW_ITEM32(reg, mtpptr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, mtpptr, 0x00, 16, 0x00, 12); enum mlxsw_reg_mtpptr_dir { MLXSW_REG_MTPPTR_DIR_INGRESS, @@ -11692,7 +11705,7 @@ MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN); * Local port number (receive port). CPU port is supported. * Access: Index */ -MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, tnqdr, 0x00, 16, 0x00, 12); /* reg_tnqdr_dscp * For encapsulation, the default DSCP. @@ -11700,7 +11713,7 @@ MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6); -static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_tnqdr_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(tnqdr, payload); mlxsw_reg_tnqdr_local_port_set(payload, local_port); @@ -12028,7 +12041,7 @@ MLXSW_REG_DEFINE(sbcm, MLXSW_REG_SBCM_ID, MLXSW_REG_SBCM_LEN); * For Egress: excludes IP Router * Access: Index */ -MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, sbcm, 0x00, 16, 0x00, 4); /* reg_sbcm_pg_buff * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress) @@ -12082,7 +12095,7 @@ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24); */ MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4); -static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, +static inline void mlxsw_reg_sbcm_pack(char *payload, u16 local_port, u8 pg_buff, enum mlxsw_reg_sbxx_dir dir, u32 min_buff, u32 max_buff, bool infi_max, u8 pool) @@ -12114,7 +12127,7 @@ MLXSW_REG_DEFINE(sbpm, MLXSW_REG_SBPM_ID, MLXSW_REG_SBPM_LEN); * For Egress: excludes IP Router * Access: Index */ -MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, sbpm, 0x00, 16, 0x00, 12); /* reg_sbpm_pool * The pool associated to quota counting on the local_port. @@ -12168,7 +12181,7 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24); */ MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24); -static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, +static inline void mlxsw_reg_sbpm_pack(char *payload, u16 local_port, u8 pool, enum mlxsw_reg_sbxx_dir dir, bool clr, u32 min_buff, u32 max_buff) { @@ -12266,6 +12279,16 @@ MLXSW_REG_DEFINE(sbsr, MLXSW_REG_SBSR_ID, MLXSW_REG_SBSR_LEN); */ MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1); +#define MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE 256 + +/* reg_sbsr_port_page + * Determines the range of the ports specified in the 'ingress_port_mask' + * and 'egress_port_mask' bit masks. + * {ingress,egress}_port_mask[x] is (256 * port_page) + x + * Access: Index + */ +MLXSW_ITEM32(reg, sbsr, port_page, 0x04, 0, 4); + /* reg_sbsr_ingress_port_mask * Bit vector for all ingress network ports. * Indicates which of the ports (for which the relevant bit is set) @@ -12353,7 +12376,7 @@ MLXSW_REG_DEFINE(sbib, MLXSW_REG_SBIB_ID, MLXSW_REG_SBIB_LEN); * Not supported for CPU port and router port * Access: Index */ -MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, sbib, 0x00, 16, 0x00, 12); /* reg_sbib_buff_size * Units represented in cells @@ -12363,7 +12386,7 @@ MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24); -static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_sbib_pack(char *payload, u16 local_port, u32 buff_size) { MLXSW_REG_ZERO(sbib, payload); @@ -12374,7 +12397,6 @@ static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port, static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(sgcr), MLXSW_REG(spad), - MLXSW_REG(smid), MLXSW_REG(sspr), MLXSW_REG(sfdat), MLXSW_REG(sfd), @@ -12384,7 +12406,6 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(spvm), MLXSW_REG(spaft), MLXSW_REG(sfgc), - MLXSW_REG(sftr), MLXSW_REG(sfdf), MLXSW_REG(sldr), MLXSW_REG(slcr), @@ -12397,6 +12418,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(spvmlr), MLXSW_REG(spvc), MLXSW_REG(spevet), + MLXSW_REG(sftr2), + MLXSW_REG(smid2), MLXSW_REG(cwtp), MLXSW_REG(cwtpm), MLXSW_REG(pgcr), @@ -12556,7 +12579,7 @@ MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pude, 0x00, 16, 0x00, 12); /* reg_pude_admin_status * Port administrative state (the desired state). diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index f1ed7660b0b5..5251f33af0fb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -46,8 +46,8 @@ #include "spectrum_trap.h" #define MLXSW_SP1_FWREV_MAJOR 13 -#define MLXSW_SP1_FWREV_MINOR 2008 -#define MLXSW_SP1_FWREV_SUBMINOR 3326 +#define MLXSW_SP1_FWREV_MINOR 2010 +#define MLXSW_SP1_FWREV_SUBMINOR 1006 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@ -63,8 +63,8 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" #define MLXSW_SP2_FWREV_MAJOR 29 -#define MLXSW_SP2_FWREV_MINOR 2008 -#define MLXSW_SP2_FWREV_SUBMINOR 3326 +#define MLXSW_SP2_FWREV_MINOR 2010 +#define MLXSW_SP2_FWREV_SUBMINOR 1006 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { .major = MLXSW_SP2_FWREV_MAJOR, @@ -78,8 +78,8 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" #define MLXSW_SP3_FWREV_MAJOR 30 -#define MLXSW_SP3_FWREV_MINOR 2008 -#define MLXSW_SP3_FWREV_SUBMINOR 3326 +#define MLXSW_SP3_FWREV_MINOR 2010 +#define MLXSW_SP3_FWREV_SUBMINOR 1006 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { .major = MLXSW_SP3_FWREV_MAJOR, @@ -352,7 +352,7 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) } static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u8 swid) + u16 local_port, u8 swid) { char pspa_pl[MLXSW_REG_PSPA_LEN]; @@ -483,7 +483,7 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) } static int -mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, +mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, struct mlxsw_sp_port_mapping *port_mapping) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; @@ -535,7 +535,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, } static int -mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, +mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, const struct mlxsw_sp_port_mapping *port_mapping) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; @@ -560,7 +560,7 @@ err_pmlp_write: return err; } -static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, u8 module) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; @@ -1474,7 +1474,7 @@ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, } static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u8 *port_number, + u16 local_port, u8 *port_number, u8 *split_port_subnumber, u8 *slot_index) { @@ -1490,7 +1490,7 @@ static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, return 0; } -static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, bool split, struct mlxsw_sp_port_mapping *port_mapping) { @@ -1781,7 +1781,7 @@ err_port_swid_set: return err; } -static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) +static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; u8 module = mlxsw_sp_port->mapping.module; @@ -1848,12 +1848,12 @@ static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp_port); } -static bool mlxsw_sp_local_port_valid(u8 local_port) +static bool mlxsw_sp_local_port_valid(u16 local_port) { return local_port != MLXSW_PORT_CPU_PORT; } -static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) +static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) { if (!mlxsw_sp_local_port_valid(local_port)) return false; @@ -1971,7 +1971,7 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, split_port_mapping = *port_mapping; split_port_mapping.width /= count; for (i = 0; i < count; i++) { - u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); if (!mlxsw_sp_local_port_valid(s_local_port)) continue; @@ -1987,7 +1987,7 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, err_port_create: for (i--; i >= 0; i--) { - u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) mlxsw_sp_port_remove(mlxsw_sp, s_local_port); @@ -2004,7 +2004,7 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, /* Go over original unsplit ports in the gap and recreate them. */ for (i = 0; i < count; i++) { - u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); port_mapping = mlxsw_sp->port_mapping[local_port]; if (!port_mapping || !mlxsw_sp_local_port_valid(local_port)) @@ -2015,14 +2015,14 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_port * -mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) +mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) { if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) return mlxsw_sp->ports[local_port]; return NULL; } -static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, +static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, unsigned int count, struct netlink_ext_ack *extack) { @@ -2065,7 +2065,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, port_mapping = mlxsw_sp_port->mapping; for (i = 0; i < count; i++) { - u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) mlxsw_sp_port_remove(mlxsw_sp, s_local_port); @@ -2085,7 +2085,7 @@ err_port_split_create: return err; } -static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, +static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -2121,7 +2121,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, } for (i = 0; i < count; i++) { - u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) mlxsw_sp_port_remove(mlxsw_sp, s_local_port); @@ -2148,7 +2148,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, struct mlxsw_sp_port *mlxsw_sp_port; enum mlxsw_reg_pude_oper_status status; unsigned int max_ports; - u8 local_port; + u16 local_port; max_ports = mlxsw_core_max_ports(mlxsw_sp->core); local_port = mlxsw_reg_pude_local_port_get(pude_pl); @@ -2174,7 +2174,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, char *mtpptr_pl, bool ingress) { - u8 local_port; + u16 local_port; u8 num_rec; int i; @@ -2212,7 +2212,7 @@ static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, } void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, - u8 local_port, void *priv) + u16 local_port, void *priv) { struct mlxsw_sp *mlxsw_sp = priv; struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; @@ -2236,7 +2236,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, netif_receive_skb(skb); } -static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, void *priv) { skb->offload_fwd_mark = 1; @@ -2244,7 +2244,7 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, } static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, - u8 local_port, void *priv) + u16 local_port, void *priv) { skb->offload_l3_fwd_mark = 1; skb->offload_fwd_mark = 1; @@ -2252,7 +2252,7 @@ static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, } void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port) + u16 local_port) { mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); } @@ -2755,6 +2755,140 @@ static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) mutex_destroy(&mlxsw_sp->parsing.lock); } +struct mlxsw_sp_ipv6_addr_node { + struct in6_addr key; + struct rhash_head ht_node; + u32 kvdl_index; + refcount_t refcount; +}; + +static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), + .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), + .key_len = sizeof(struct in6_addr), + .automatic_shrinking = true, +}; + +static int +mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, + u32 *p_kvdl_index) +{ + struct mlxsw_sp_ipv6_addr_node *node; + char rips_pl[MLXSW_REG_RIPS_LEN]; + int err; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, + MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + p_kvdl_index); + if (err) + return err; + + mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); + if (err) + goto err_rips_write; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + err = -ENOMEM; + goto err_node_alloc; + } + + node->key = *addr6; + node->kvdl_index = *p_kvdl_index; + refcount_set(&node->refcount, 1); + + err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, + &node->ht_node, + mlxsw_sp_ipv6_addr_ht_params); + if (err) + goto err_rhashtable_insert; + + return 0; + +err_rhashtable_insert: + kfree(node); +err_node_alloc: +err_rips_write: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + *p_kvdl_index); + return err; +} + +static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipv6_addr_node *node) +{ + u32 kvdl_index = node->kvdl_index; + + rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, + mlxsw_sp_ipv6_addr_ht_params); + kfree(node); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + kvdl_index); +} + +int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6, + u32 *p_kvdl_index) +{ + struct mlxsw_sp_ipv6_addr_node *node; + int err = 0; + + mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); + node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, + mlxsw_sp_ipv6_addr_ht_params); + if (node) { + refcount_inc(&node->refcount); + *p_kvdl_index = node->kvdl_index; + goto out_unlock; + } + + err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); + +out_unlock: + mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); + return err; +} + +void +mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) +{ + struct mlxsw_sp_ipv6_addr_node *node; + + mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); + node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, + mlxsw_sp_ipv6_addr_ht_params); + if (WARN_ON(!node)) + goto out_unlock; + + if (!refcount_dec_and_test(&node->refcount)) + goto out_unlock; + + mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); + +out_unlock: + mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); +} + +static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, + &mlxsw_sp_ipv6_addr_ht_params); + if (err) + return err; + + mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); + return 0; +} + +static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) +{ + mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); + rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); +} + static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info, struct netlink_ext_ack *extack) @@ -2843,6 +2977,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_afa_init; } + err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); + goto err_ipv6_addr_ht_init; + } + err = mlxsw_sp_nve_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); @@ -2944,6 +3084,8 @@ err_router_init: err_acl_init: mlxsw_sp_nve_fini(mlxsw_sp); err_nve_init: + mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); +err_ipv6_addr_ht_init: mlxsw_sp_afa_fini(mlxsw_sp); err_afa_init: mlxsw_sp_counter_pool_fini(mlxsw_sp); @@ -3075,6 +3217,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_router_fini(mlxsw_sp); mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_nve_fini(mlxsw_sp); + mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); @@ -3486,7 +3629,7 @@ static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) } static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 32fdd37657dd..8445fc5c9ea3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -203,6 +203,8 @@ struct mlxsw_sp { const struct mlxsw_listener *listeners; size_t listeners_count; u32 lowest_shaper_bs; + struct rhashtable ipv6_addr_ht; + struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */ }; struct mlxsw_sp_ptp_ops { @@ -217,13 +219,13 @@ struct mlxsw_sp_ptp_ops { * is responsible for freeing the passed-in SKB. */ void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); + u16 local_port); /* Notify a driver that a timestamped packet was transmitted. Driver * is responsible for freeing the passed-in SKB. */ void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); + u16 local_port); int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, struct hwtstamp_config *config); @@ -261,7 +263,7 @@ enum mlxsw_sp_sample_trigger_type { struct mlxsw_sp_sample_trigger { enum mlxsw_sp_sample_trigger_type type; - u8 local_port; /* Reserved when trigger type is not ingress / egress. */ + u16 local_port; /* Reserved when trigger type is not ingress / egress. */ }; struct mlxsw_sp_sample_params { @@ -308,7 +310,7 @@ struct mlxsw_sp_port { struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sp *mlxsw_sp; - u8 local_port; + u16 local_port; u8 lagged:1, split:1; u16 pvid; @@ -370,7 +372,7 @@ struct mlxsw_sp_port_type_speed_ops { u32 (*to_ptys_speed_lanes)(struct mlxsw_sp *mlxsw_sp, u8 width, const struct ethtool_link_ksettings *cmd); void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload, - u8 local_port, u32 proto_admin, bool autoneg); + u16 local_port, u32 proto_admin, bool autoneg); void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload, u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, @@ -441,7 +443,7 @@ static inline struct mlxsw_sp_port * mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) { struct mlxsw_sp_port *mlxsw_sp_port; - u8 local_port; + u16 local_port; local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core, lag_id, port_index); @@ -587,6 +589,11 @@ mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_sample_trigger *trigger); +int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6, + u32 *p_kvdl_index); +void +mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6); extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals; extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals; @@ -621,9 +628,9 @@ extern struct notifier_block mlxsw_sp_switchdev_notifier; /* spectrum.c */ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, - u8 local_port, void *priv); + u16 local_port, void *priv); void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); + u16 local_port); int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed); int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, @@ -729,7 +736,7 @@ void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); -u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); +u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, enum mlxsw_sp_l3proto ul_proto, const union mlxsw_sp_l3addr *ul_sip, @@ -1222,7 +1229,7 @@ bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid); void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid, const struct net_device *nve_dev); int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid, - enum mlxsw_sp_flood_type packet_type, u8 local_port, + enum mlxsw_sp_flood_type packet_type, u16 local_port, bool member); int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid, struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); @@ -1310,6 +1317,17 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, enum mlxsw_sp_l3proto proto, union mlxsw_sp_l3addr *addr); +int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6, + u32 *p_kvdl_index); +void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6); +int +mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid_index, + const struct in6_addr *new_addr6); +void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid_index); int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, struct mlxsw_sp_nve_params *params, struct netlink_ext_ack *extack); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 67cedfa76f78..70c11bfac08f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -406,7 +406,7 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port; - u8 local_port; + u16 local_port; bool in_port; if (out_dev) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index c72aa38424dc..50806594d977 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -83,7 +83,7 @@ static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index, } static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, - u8 local_port) + u16 local_port) { struct mlxsw_sp *mlxsw_sp = priv; char ppbs_pl[MLXSW_REG_PPBS_LEN]; @@ -132,7 +132,7 @@ mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index) } static int -mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, +mlxsw_sp_act_mirror_add(void *priv, u16 local_in_port, const struct net_device *out_dev, bool ingress, int *p_span_id) { @@ -159,7 +159,7 @@ err_analyzed_port_get: } static void -mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress) +mlxsw_sp_act_mirror_del(void *priv, u16 local_in_port, int span_id, bool ingress) { struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp = priv; @@ -192,7 +192,7 @@ static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index) policer_index); } -static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port, +static int mlxsw_sp1_act_sampler_add(void *priv, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, int *p_span_id, @@ -202,7 +202,7 @@ static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port, return -EOPNOTSUPP; } -static void mlxsw_sp1_act_sampler_del(void *priv, u8 local_port, int span_id, +static void mlxsw_sp1_act_sampler_del(void *priv, u16 local_port, int span_id, bool ingress) { WARN_ON_ONCE(1); @@ -224,7 +224,7 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { .sampler_del = mlxsw_sp1_act_sampler_del, }; -static int mlxsw_sp2_act_sampler_add(void *priv, u8 local_port, +static int mlxsw_sp2_act_sampler_add(void *priv, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, int *p_span_id, @@ -272,7 +272,7 @@ err_span_agent_get: return err; } -static void mlxsw_sp2_act_sampler_del(void *priv, u8 local_port, int span_id, +static void mlxsw_sp2_act_sampler_del(void *priv, u16 local_port, int span_id, bool ingress) { struct mlxsw_sp_sample_trigger trigger = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index d78cf5a7220a..98f26f596e30 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -160,7 +160,7 @@ static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir) } static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u8 pg_buff, + u16 local_port, u8 pg_buff, enum mlxsw_reg_sbxx_dir dir) { struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port]; @@ -173,7 +173,7 @@ static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u16 pool_index) + u16 local_port, u16 pool_index) { return &mlxsw_sp->sb->ports[local_port].pms[pool_index]; } @@ -202,7 +202,7 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index, return 0; } -static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port, u8 pg_buff, u32 min_buff, u32 max_buff, bool infi_max, u16 pool_index) { @@ -232,7 +232,7 @@ static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, return 0; } -static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port, u16 pool_index, u32 min_buff, u32 max_buff) { const struct mlxsw_sp_sb_pool_des *des = @@ -253,7 +253,7 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, return 0; } -static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u16 local_port, u16 pool_index, struct list_head *bulk_list) { const struct mlxsw_sp_sb_pool_des *des = @@ -279,7 +279,7 @@ static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core, mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max); } -static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u16 local_port, u16 pool_index, struct list_head *bulk_list) { const struct mlxsw_sp_sb_pool_des *des = @@ -919,7 +919,7 @@ mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index) return pr->mode == MLXSW_REG_SBPR_MODE_STATIC; } -static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port, enum mlxsw_reg_sbxx_dir dir, const struct mlxsw_sp_sb_cm *cms, size_t cms_len) @@ -1037,7 +1037,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = { MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), }; -static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port, const struct mlxsw_sp_sb_pm *pms, bool skip_ingress) { @@ -1416,7 +1416,7 @@ int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); @@ -1432,7 +1432,7 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u32 max_buff; int err; @@ -1458,7 +1458,7 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u8 pg_buff = tc_index; enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, @@ -1479,7 +1479,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; const struct mlxsw_sp_sb_cm *cm; u8 pg_buff = tc_index; enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; @@ -1526,7 +1526,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_sb_sr_occ_query_cb_ctx { u8 masked_count; - u8 local_port_1; + u16 local_port_1; }; static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, @@ -1536,7 +1536,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; u8 masked_count; - u8 local_port; + u16 local_port; int rec_index = 0; struct mlxsw_sp_sb_cm *cm; int i; @@ -1582,13 +1582,12 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, unsigned int sb_index) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + u16 local_port, local_port_1, last_local_port; struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; + u8 masked_count, current_page = 0; unsigned long cb_priv = 0; LIST_HEAD(bulk_list); char *sbsr_pl; - u8 masked_count; - u8 local_port_1; - u8 local_port; int i; int err; int err2; @@ -1602,6 +1601,10 @@ next_batch: local_port_1 = local_port; masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, false); + mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page); + last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + + MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; + for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) @@ -1609,6 +1612,10 @@ next_batch: for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; + if (local_port > last_local_port) { + current_page++; + goto do_query; + } if (local_port != MLXSW_PORT_CPU_PORT) { /* Ingress quotas are not supported for the CPU port */ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, @@ -1651,10 +1658,11 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, unsigned int sb_index) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + u16 local_port, last_local_port; LIST_HEAD(bulk_list); - char *sbsr_pl; unsigned int masked_count; - u8 local_port; + u8 current_page = 0; + char *sbsr_pl; int i; int err; int err2; @@ -1667,6 +1675,10 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, next_batch: masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, true); + mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page); + last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + + MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; + for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) @@ -1674,6 +1686,10 @@ next_batch: for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; + if (local_port > last_local_port) { + current_page++; + goto do_query; + } if (local_port != MLXSW_PORT_CPU_PORT) { /* Ingress quotas are not supported for the CPU port */ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, @@ -1715,7 +1731,7 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); @@ -1732,7 +1748,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u8 pg_buff = tc_index; enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 84d4460f3dcd..20530712eadb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1491,7 +1491,7 @@ static u32 mlxsw_sp1_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width, static void mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, - u8 local_port, u32 proto_admin, bool autoneg) + u16 local_port, u32 proto_admin, bool autoneg) { mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); } @@ -1969,7 +1969,7 @@ static u32 mlxsw_sp2_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width, static void mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, - u8 local_port, u32 proto_admin, + u16 local_port, u32 proto_admin, bool autoneg) { mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 004c42274e48..ce80931f0402 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -317,13 +317,13 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid, } int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid, - enum mlxsw_sp_flood_type packet_type, u8 local_port, + enum mlxsw_sp_flood_type packet_type, u16 local_port, bool member) { struct mlxsw_sp_fid_family *fid_family = fid->fid_family; const struct mlxsw_sp_fid_ops *ops = fid_family->ops; const struct mlxsw_sp_flood_table *flood_table; - char *sftr_pl; + char *sftr2_pl; int err; if (WARN_ON(!fid_family->flood_tables || !ops->flood_index)) @@ -333,16 +333,16 @@ int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid, if (!flood_table) return -ESRCH; - sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); - if (!sftr_pl) + sftr2_pl = kmalloc(MLXSW_REG_SFTR2_LEN, GFP_KERNEL); + if (!sftr2_pl) return -ENOMEM; - mlxsw_reg_sftr_pack(sftr_pl, flood_table->table_index, - ops->flood_index(fid), flood_table->table_type, 1, - local_port, member); - err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr), - sftr_pl); - kfree(sftr_pl); + mlxsw_reg_sftr2_pack(sftr2_pl, flood_table->table_index, + ops->flood_index(fid), flood_table->table_type, 1, + local_port, member); + err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr2), + sftr2_pl); + kfree(sftr2_pl); return err; } @@ -439,7 +439,7 @@ static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index, } static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index, - u8 local_port, u16 vid, bool valid) + u16 local_port, u16 vid, bool valid) { enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; char svfa_pl[MLXSW_REG_SVFA_LEN]; @@ -573,7 +573,7 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid, u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; int err; err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, @@ -601,7 +601,7 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid, struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1) mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); @@ -784,7 +784,7 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid, u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; int err; /* We only need to transition the port to virtual mode since @@ -808,7 +808,7 @@ mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid, struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1) mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index ad3926de88f2..01cf5a6a26bd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -568,37 +568,21 @@ static int mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry) { - char rips_pl[MLXSW_REG_RIPS_LEN]; struct __ip6_tnl_parm parms6; - int err; - - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, - MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, - &ipip_entry->dip_kvdl_index); - if (err) - return err; parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev); - mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index, - &parms6.raddr); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); - if (err) - goto err_rips_write; - - return 0; - -err_rips_write: - mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, - ipip_entry->dip_kvdl_index); - return err; + return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr, + &ipip_entry->dip_kvdl_index); } static void mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_ipip_entry *ipip_entry) { - mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, - ipip_entry->dip_kvdl_index); + struct __ip6_tnl_parm parms6; + + parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev); + mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr); } static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index 9eba8fa684ae..d2b57a045aa4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c @@ -130,15 +130,25 @@ mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record, struct mlxsw_sp_nve_mc_entry *mc_entry, const union mlxsw_sp_l3addr *addr) { - WARN_ON(1); + u32 kvdl_index; + int err; + + err = mlxsw_sp_ipv6_addr_kvdl_index_get(mc_record->mlxsw_sp, + &addr->addr6, &kvdl_index); + if (err) + return err; - return -EINVAL; + mc_entry->ipv6_entry.addr6 = addr->addr6; + mc_entry->ipv6_entry.addr6_kvdl_index = kvdl_index; + return 0; } static void mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record, const struct mlxsw_sp_nve_mc_entry *mc_entry) { + mlxsw_sp_ipv6_addr_put(mc_record->mlxsw_sp, + &mc_entry->ipv6_entry.addr6); } static void @@ -787,6 +797,142 @@ static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp, ops->fdb_clear_offload(nve_dev, vni); } +struct mlxsw_sp_nve_ipv6_ht_key { + u8 mac[ETH_ALEN]; + u16 fid_index; +}; + +struct mlxsw_sp_nve_ipv6_ht_node { + struct rhash_head ht_node; + struct list_head list; + struct mlxsw_sp_nve_ipv6_ht_key key; + struct in6_addr addr6; +}; + +static const struct rhashtable_params mlxsw_sp_nve_ipv6_ht_params = { + .key_len = sizeof(struct mlxsw_sp_nve_ipv6_ht_key), + .key_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, key), + .head_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, ht_node), +}; + +int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6, + u32 *p_kvdl_index) +{ + return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, addr6, p_kvdl_index); +} + +void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6) +{ + mlxsw_sp_ipv6_addr_put(mlxsw_sp, addr6); +} + +static struct mlxsw_sp_nve_ipv6_ht_node * +mlxsw_sp_nve_ipv6_ht_node_lookup(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid_index) +{ + struct mlxsw_sp_nve_ipv6_ht_key key = {}; + + ether_addr_copy(key.mac, mac); + key.fid_index = fid_index; + return rhashtable_lookup_fast(&mlxsw_sp->nve->ipv6_ht, &key, + mlxsw_sp_nve_ipv6_ht_params); +} + +static int mlxsw_sp_nve_ipv6_ht_insert(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid_index, + const struct in6_addr *addr6) +{ + struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node; + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + int err; + + ipv6_ht_node = kzalloc(sizeof(*ipv6_ht_node), GFP_KERNEL); + if (!ipv6_ht_node) + return -ENOMEM; + + ether_addr_copy(ipv6_ht_node->key.mac, mac); + ipv6_ht_node->key.fid_index = fid_index; + ipv6_ht_node->addr6 = *addr6; + + err = rhashtable_insert_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node, + mlxsw_sp_nve_ipv6_ht_params); + if (err) + goto err_rhashtable_insert; + + list_add(&ipv6_ht_node->list, &nve->ipv6_addr_list); + + return 0; + +err_rhashtable_insert: + kfree(ipv6_ht_node); + return err; +} + +static void +mlxsw_sp_nve_ipv6_ht_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node) +{ + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + + list_del(&ipv6_ht_node->list); + rhashtable_remove_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node, + mlxsw_sp_nve_ipv6_ht_params); + kfree(ipv6_ht_node); +} + +int +mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid_index, + const struct in6_addr *new_addr6) +{ + struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node; + + ASSERT_RTNL(); + + ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac, + fid_index); + if (!ipv6_ht_node) + return mlxsw_sp_nve_ipv6_ht_insert(mlxsw_sp, mac, fid_index, + new_addr6); + + mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6); + ipv6_ht_node->addr6 = *new_addr6; + return 0; +} + +void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid_index) +{ + struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node; + + ASSERT_RTNL(); + + ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac, + fid_index); + if (WARN_ON(!ipv6_ht_node)) + return; + + mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node); +} + +static void mlxsw_sp_nve_ipv6_addr_flush_by_fid(struct mlxsw_sp *mlxsw_sp, + u16 fid_index) +{ + struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node, *tmp; + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + + list_for_each_entry_safe(ipv6_ht_node, tmp, &nve->ipv6_addr_list, + list) { + if (ipv6_ht_node->key.fid_index != fid_index) + continue; + + mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6); + mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node); + } +} + int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, struct mlxsw_sp_nve_params *params, struct netlink_ext_ack *extack) @@ -845,6 +991,7 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid); mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index); + mlxsw_sp_nve_ipv6_addr_flush_by_fid(mlxsw_sp, fid_index); if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) || mlxsw_sp_fid_vni(fid, &vni))) @@ -981,7 +1128,13 @@ int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp) err = rhashtable_init(&nve->mc_list_ht, &mlxsw_sp_nve_mc_list_ht_params); if (err) - goto err_rhashtable_init; + goto err_mc_rhashtable_init; + + err = rhashtable_init(&nve->ipv6_ht, &mlxsw_sp_nve_ipv6_ht_params); + if (err) + goto err_ipv6_rhashtable_init; + + INIT_LIST_HEAD(&nve->ipv6_addr_list); err = mlxsw_sp_nve_qos_init(mlxsw_sp); if (err) @@ -1000,8 +1153,10 @@ int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp) err_nve_resources_query: err_nve_ecn_init: err_nve_qos_init: + rhashtable_destroy(&nve->ipv6_ht); +err_ipv6_rhashtable_init: rhashtable_destroy(&nve->mc_list_ht); -err_rhashtable_init: +err_mc_rhashtable_init: mlxsw_sp->nve = NULL; kfree(nve); return err; @@ -1010,6 +1165,8 @@ err_rhashtable_init: void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp) { WARN_ON(mlxsw_sp->nve->num_nve_tunnels); + WARN_ON(!list_empty(&mlxsw_sp->nve->ipv6_addr_list)); + rhashtable_destroy(&mlxsw_sp->nve->ipv6_ht); rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht); kfree(mlxsw_sp->nve); mlxsw_sp->nve = NULL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h index 98d1fdc25eac..0d21de1d0395 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h @@ -23,6 +23,8 @@ struct mlxsw_sp_nve_config { struct mlxsw_sp_nve { struct mlxsw_sp_nve_config config; struct rhashtable mc_list_ht; + struct rhashtable ipv6_ht; + struct list_head ipv6_addr_list; /* Saves hash table nodes. */ struct mlxsw_sp *mlxsw_sp; const struct mlxsw_sp_nve_ops **nve_ops_arr; unsigned int num_nve_tunnels; /* Protected by RTNL */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index d018d2da5949..d309b77a0194 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -10,8 +10,48 @@ #include "spectrum.h" #include "spectrum_nve.h" -#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \ +#define MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \ VXLAN_F_LEARN) +#define MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS (VXLAN_F_IPV6 | \ + VXLAN_F_UDP_ZERO_CSUM6_TX | \ + VXLAN_F_UDP_ZERO_CSUM6_RX) + +static bool mlxsw_sp_nve_vxlan_ipv4_flags_check(const struct vxlan_config *cfg, + struct netlink_ext_ack *extack) +{ + if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX"); + return false; + } + + if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag"); + return false; + } + + return true; +} + +static bool mlxsw_sp_nve_vxlan_ipv6_flags_check(const struct vxlan_config *cfg, + struct netlink_ext_ack *extack) +{ + if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX"); + return false; + } + + if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for RX"); + return false; + } + + if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag"); + return false; + } + + return true; +} static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_params *params, @@ -20,11 +60,6 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, struct vxlan_dev *vxlan = netdev_priv(params->dev); struct vxlan_config *cfg = &vxlan->cfg; - if (cfg->saddr.sa.sa_family != AF_INET) { - NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only IPv4 underlay is supported"); - return false; - } - if (vxlan_addr_multicast(&cfg->remote_ip)) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported"); return false; @@ -55,14 +90,15 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, return false; } - if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) { - NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported"); - return false; - } - - if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS) { - NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag"); - return false; + switch (cfg->saddr.sa.sa_family) { + case AF_INET: + if (!mlxsw_sp_nve_vxlan_ipv4_flags_check(cfg, extack)) + return false; + break; + case AF_INET6: + if (!mlxsw_sp_nve_vxlan_ipv6_flags_check(cfg, extack)) + return false; + break; } if (cfg->ttl == 0) { @@ -90,6 +126,22 @@ static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack); } +static void +mlxsw_sp_nve_vxlan_ul_proto_sip_config(const struct vxlan_config *cfg, + struct mlxsw_sp_nve_config *config) +{ + switch (cfg->saddr.sa.sa_family) { + case AF_INET: + config->ul_proto = MLXSW_SP_L3_PROTO_IPV4; + config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr; + break; + case AF_INET6: + config->ul_proto = MLXSW_SP_L3_PROTO_IPV6; + config->ul_sip.addr6 = cfg->saddr.sin6.sin6_addr; + break; + } +} + static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_params *params, struct mlxsw_sp_nve_config *config) @@ -102,8 +154,7 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, config->flowlabel = cfg->label; config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0; config->ul_tb_id = RT_TABLE_MAIN; - config->ul_proto = MLXSW_SP_L3_PROTO_IPV4; - config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr; + mlxsw_sp_nve_vxlan_ul_proto_sip_config(cfg, config); config->udp_dport = cfg->dst_port; } @@ -111,6 +162,7 @@ static void mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl, const struct mlxsw_sp_nve_config *config) { + struct in6_addr addr6; u8 udp_sport; mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true, @@ -122,7 +174,18 @@ mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl, get_random_bytes(&udp_sport, sizeof(udp_sport)); udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80; mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport); - mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4)); + + switch (config->ul_proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_reg_tngcr_usipv4_set(tngcr_pl, + be32_to_cpu(config->ul_sip.addr4)); + break; + case MLXSW_SP_L3_PROTO_IPV6: + addr6 = config->ul_sip.addr6; + mlxsw_reg_tngcr_usipv6_memcpy_to(tngcr_pl, + (const char *)&addr6); + break; + } } static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index 1a180384e7e8..0ff163fbc775 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -36,7 +36,7 @@ struct mlxsw_sp_ptp_state { }; struct mlxsw_sp1_ptp_key { - u8 local_port; + u16 local_port; u8 message_type; u16 sequence_id; u8 domain_number; @@ -406,7 +406,7 @@ mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp, * This case is similar to 2) above. */ static void mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port, + struct sk_buff *skb, u16 local_port, bool ingress, struct skb_shared_hwtstamps *hwtstamps) { @@ -524,7 +524,7 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp, } static void mlxsw_sp1_ptp_got_packet(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port, + struct sk_buff *skb, u16 local_port, bool ingress) { struct mlxsw_sp_port *mlxsw_sp_port; @@ -564,7 +564,7 @@ immediate: } void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress, - u8 local_port, u8 message_type, + u16 local_port, u8 message_type, u8 domain_number, u16 sequence_id, u64 timestamp) { @@ -599,14 +599,14 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress, } void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port) + u16 local_port) { skb_reset_mac_header(skb); mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true); } void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h index 1d43a3755285..c06cd1384bca 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h @@ -31,13 +31,13 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state); void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); + u16 local_port); void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port); + struct sk_buff *skb, u16 local_port); void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress, - u8 local_port, u8 message_type, + u16 local_port, u8 message_type, u8 domain_number, u16 sequence_id, u64 timestamp); @@ -80,20 +80,20 @@ static inline void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state) } static inline void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp); } static inline void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { dev_kfree_skb_any(skb); } static inline void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress, - u8 local_port, u8 message_type, + u16 local_port, u8 message_type, u8 domain_number, u16 sequence_id, u64 timestamp) { @@ -159,13 +159,13 @@ static inline void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state) } static inline void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp); } static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { dev_kfree_skb_any(skb); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 98df6e8fa45f..7257a04660d7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1307,6 +1307,10 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id, addr_prefix_len = 32; break; case MLXSW_SP_L3_PROTO_IPV6: + addrp = &addr->addr6; + addr_len = 16; + addr_prefix_len = 128; + break; default: WARN_ON(1); return NULL; @@ -7002,6 +7006,8 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi; union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr }; + u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id); + struct mlxsw_sp_router *router = mlxsw_sp->router; int ifindex = nhgi->nexthops[0].ifindex; struct mlxsw_sp_ipip_entry *ipip_entry; @@ -7015,6 +7021,14 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry, ipip_entry); } + if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id, + MLXSW_SP_L3_PROTO_IPV6, &dip)) { + u32 tunnel_index; + + tunnel_index = router->nve_decap_config.tunnel_index; + fib_entry->decap.tunnel_index = tunnel_index; + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; + } return 0; } @@ -9340,7 +9354,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); } -u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp) +u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp) { return mlxsw_core_max_ports(mlxsw_sp->core) + 1; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index f5f819aa9a65..f9671cc53002 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -37,7 +37,7 @@ struct mlxsw_sp_span { struct mlxsw_sp_span_analyzed_port { struct list_head list; /* Member of analyzed_ports_list */ refcount_t ref_count; - u8 local_port; + u16 local_port; bool ingress; }; @@ -46,7 +46,7 @@ struct mlxsw_sp_span_trigger_entry { struct mlxsw_sp_span *span; const struct mlxsw_sp_span_trigger_ops *ops; refcount_t ref_count; - u8 local_port; + u16 local_port; enum mlxsw_sp_span_trigger trigger; struct mlxsw_sp_span_trigger_parms parms; }; @@ -179,7 +179,7 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, { struct mlxsw_sp_port *dest_port = sparms.dest_port; struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; - u8 local_port = dest_port->local_port; + u16 local_port = dest_port->local_port; char mpat_pl[MLXSW_REG_MPAT_LEN]; int pa_id = span_entry->id; @@ -199,7 +199,7 @@ mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, { struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; - u8 local_port = dest_port->local_port; + u16 local_port = dest_port->local_port; char mpat_pl[MLXSW_REG_MPAT_LEN]; int pa_id = span_entry->id; @@ -480,7 +480,7 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, { struct mlxsw_sp_port *dest_port = sparms.dest_port; struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; - u8 local_port = dest_port->local_port; + u16 local_port = dest_port->local_port; char mpat_pl[MLXSW_REG_MPAT_LEN]; int pa_id = span_entry->id; @@ -584,7 +584,7 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, { struct mlxsw_sp_port *dest_port = sparms.dest_port; struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; - u8 local_port = dest_port->local_port; + u16 local_port = dest_port->local_port; char mpat_pl[MLXSW_REG_MPAT_LEN]; int pa_id = span_entry->id; @@ -650,7 +650,7 @@ mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, { struct mlxsw_sp_port *dest_port = sparms.dest_port; struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; - u8 local_port = dest_port->local_port; + u16 local_port = dest_port->local_port; char mpat_pl[MLXSW_REG_MPAT_LEN]; int pa_id = span_entry->id; @@ -997,7 +997,7 @@ static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_por } static struct mlxsw_sp_span_analyzed_port * -mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port, +mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u16 local_port, bool ingress) { struct mlxsw_sp_span_analyzed_port *analyzed_port; @@ -1165,7 +1165,7 @@ int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port, { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_span_analyzed_port *analyzed_port; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; int err = 0; mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); @@ -1193,7 +1193,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port, { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_span_analyzed_port *analyzed_port; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 81c7e8a7fcf5..65c1724c63b0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -865,17 +865,17 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, bool add) { - char *smid_pl; + char *smid2_pl; int err; - smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); - if (!smid_pl) + smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL); + if (!smid2_pl) return -ENOMEM; - mlxsw_reg_smid_pack(smid_pl, mid_idx, - mlxsw_sp_router_port(mlxsw_sp), add); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); - kfree(smid_pl); + mlxsw_reg_smid2_pack(smid2_pl, mid_idx, + mlxsw_sp_router_port(mlxsw_sp), add); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl); + kfree(smid2_pl); return err; } @@ -980,7 +980,7 @@ mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp_bridge_device *bridge_device; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u16 vid = mlxsw_sp_port_vlan->vid; struct mlxsw_sp_fid *fid; int err; @@ -1029,7 +1029,7 @@ mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u16 vid = mlxsw_sp_port_vlan->vid; mlxsw_sp_port_vlan->fid = NULL; @@ -1290,38 +1290,52 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) MLXSW_REG_SFD_OP_WRITE_REMOVE; } -static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp, - const char *mac, u16 fid, - enum mlxsw_sp_l3proto proto, - const union mlxsw_sp_l3addr *addr, - bool adding, bool dynamic) +static int +mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic, + const char *mac, u16 fid, __be32 addr, bool adding) { - enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto; char *sfd_pl; u8 num_rec; u32 uip; int err; - switch (proto) { - case MLXSW_SP_L3_PROTO_IPV4: - uip = be32_to_cpu(addr->addr4); - sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4; - break; - case MLXSW_SP_L3_PROTO_IPV6: - default: - WARN_ON(1); - return -EOPNOTSUPP; - } + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + uip = be32_to_cpu(addr); + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0, + mlxsw_sp_sfd_rec_policy(dynamic), mac, + fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: + kfree(sfd_pl); + return err; +} + +static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + u32 kvdl_index, bool adding) +{ + char *sfd_pl; + u8 num_rec; + int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); if (!sfd_pl) return -ENOMEM; mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); - mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0, - mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, - MLXSW_REG_SFD_REC_ACTION_NOP, uip, - sfd_proto); + mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid, + MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index); num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); if (err) @@ -1335,7 +1349,80 @@ out: return err; } -static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + const struct in6_addr *addr) +{ + u32 kvdl_index; + int err; + + err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index); + if (err) + return err; + + err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, + kvdl_index, true); + if (err) + goto err_sfd_write; + + err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr); + if (err) + /* Replace can fail only for creating new mapping, so removing + * the FDB entry in the error path is OK. + */ + goto err_addr_replace; + + return 0; + +err_addr_replace: + mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index, + false); +err_sfd_write: + mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr); + return err; +} + +static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + const struct in6_addr *addr) +{ + mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid); + mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false); + mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr); +} + +static int +mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid, const struct in6_addr *addr, bool adding) +{ + if (adding) + return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid, + addr); + + mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr); + return 0; +} + +static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + enum mlxsw_sp_l3proto proto, + const union mlxsw_sp_l3addr *addr, + bool adding, bool dynamic) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid, + addr->addr4, adding); + case MLXSW_SP_L3_PROTO_IPV6: + return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid, + &addr->addr6, adding); + default: + WARN_ON(1); + return -EOPNOTSUPP; + } +} + +static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port, const char *mac, u16 fid, bool adding, enum mlxsw_reg_sfd_rec_action action, enum mlxsw_reg_sfd_rec_policy policy) @@ -1363,7 +1450,7 @@ out: return err; } -static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port, const char *mac, u16 fid, bool adding, bool dynamic) { @@ -1477,30 +1564,30 @@ static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, long *ports_bitmap, bool set_router_port) { - char *smid_pl; + char *smid2_pl; int err, i; - smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); - if (!smid_pl) + smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL); + if (!smid2_pl) return -ENOMEM; - mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false); + mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false); for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { if (mlxsw_sp->ports[i]) - mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); + mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1); } - mlxsw_reg_smid_port_mask_set(smid_pl, - mlxsw_sp_router_port(mlxsw_sp), 1); + mlxsw_reg_smid2_port_mask_set(smid2_pl, + mlxsw_sp_router_port(mlxsw_sp), 1); for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core)) - mlxsw_reg_smid_port_set(smid_pl, i, 1); + mlxsw_reg_smid2_port_set(smid2_pl, i, 1); - mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp), - set_router_port); + mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp), + set_router_port); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); - kfree(smid_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl); + kfree(smid2_pl); return err; } @@ -1508,16 +1595,16 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid_idx, bool add) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char *smid_pl; + char *smid2_pl; int err; - smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); - if (!smid_pl) + smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL); + if (!smid2_pl) return -ENOMEM; - mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); - kfree(smid_pl); + mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl); + kfree(smid2_pl); return err; } @@ -2536,7 +2623,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_port *mlxsw_sp_port; enum switchdev_notifier_type type; char mac[ETH_ALEN]; - u8 local_port; + u16 local_port; u16 vid, fid; bool do_notification = true; int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 26d01adbedad..47b061b99160 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -60,7 +60,7 @@ enum { }; static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port, + u16 local_port, struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp_port_pcpu_stats *pcpu_stats; @@ -85,7 +85,7 @@ static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, return 0; } -static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct devlink_port *in_devlink_port; @@ -109,7 +109,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port, consume_skb(skb); } -static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { u32 cookie_index = mlxsw_skb_cb(skb)->rx_md_info.cookie_index; @@ -138,7 +138,7 @@ static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port, consume_skb(skb); } -static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port, +static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct devlink_port *in_devlink_port; @@ -164,7 +164,7 @@ static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port, return 0; } -static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { int err; @@ -176,14 +176,14 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port, netif_receive_skb(skb); } -static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { skb->offload_fwd_mark = 1; mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx); } -static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { skb->offload_l3_fwd_mark = 1; @@ -191,7 +191,7 @@ static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port, mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx); } -static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); @@ -212,7 +212,7 @@ static struct mlxsw_sp_port * mlxsw_sp_sample_tx_port_get(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_rx_md_info *rx_md_info) { - u8 local_port; + u16 local_port; if (!rx_md_info->tx_port_valid) return NULL; @@ -257,7 +257,7 @@ static void mlxsw_sp_psample_md_init(struct mlxsw_sp *mlxsw_sp, md->latency <<= MLXSW_SP_MIRROR_LATENCY_SHIFT; } -static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); @@ -293,7 +293,7 @@ out: consume_skb(skb); } -static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info; @@ -343,7 +343,7 @@ out: consume_skb(skb); } -static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 9380e396f648..8b7a8d879083 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -1305,12 +1305,6 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - if (config.flags) { - netif_warn(adapter, drv, adapter->netdev, - "ignoring hwtstamp_config.flags == 0x%08X, expected 0\n", - config.flags); - } - switch (config.tx_type) { case HWTSTAMP_TX_OFF: for (index = 0; index < LAN743X_MAX_TX_CHANNELS; diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig index 83ac4266b417..2860a8c9923d 100644 --- a/drivers/net/ethernet/microchip/lan966x/Kconfig +++ b/drivers/net/ethernet/microchip/lan966x/Kconfig @@ -3,5 +3,6 @@ config LAN966X_SWITCH depends on HAS_IOMEM depends on OF select PHYLINK + select PACKING help This driver supports the Lan966x network switch device. diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index e9e4dca6542d..101c1f005baf 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -83,10 +83,10 @@ static int lan966x_create_targets(struct platform_device *pdev, begin[idx] = devm_ioremap(&pdev->dev, iores[idx]->start, resource_size(iores[idx])); - if (IS_ERR(begin[idx])) { + if (!begin[idx]) { dev_err(&pdev->dev, "Unable to get registers: %s\n", iores[idx]->name); - return PTR_ERR(begin[idx]); + return -ENOMEM; } } @@ -512,11 +512,6 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args) *buf = val; } - if (sz < 0) { - err = sz; - break; - } - skb->protocol = eth_type_trans(skb, dev); netif_rx_ni(skb); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c index 2ddb20585d40..237555845a52 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -331,7 +331,6 @@ int lan966x_port_pcs_set(struct lan966x_port *port, struct lan966x *lan966x = port->lan966x; bool inband_aneg = false; bool outband; - int err; if (config->inband) { if (config->portmode == PHY_INTERFACE_MODE_SGMII || @@ -341,11 +340,6 @@ int lan966x_port_pcs_set(struct lan966x_port *port, config->autoneg) inband_aneg = true; /* Clause-37 in-band-aneg */ - if (config->speed > 0) { - err = phy_set_speed(port->serdes, config->speed); - if (err) - return err; - } outband = false; } else { outband = true; diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 34b971ff8ef8..078d6a5a0768 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -480,16 +480,16 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc, if (err) goto out; - err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size, - &hwc_wq->msg_buf); - if (err) - goto out; - hwc_wq->hwc = hwc; hwc_wq->gdma_wq = queue; hwc_wq->queue_depth = q_depth; hwc_wq->hwc_cq = hwc_cq; + err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size, + &hwc_wq->msg_buf); + if (err) + goto out; + *hwc_wq_ptr = hwc_wq; return 0; out: diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile index 722c27694b21..41b34a509308 100644 --- a/drivers/net/ethernet/mscc/Makefile +++ b/drivers/net/ethernet/mscc/Makefile @@ -7,9 +7,11 @@ mscc_ocelot_switch_lib-y := \ ocelot_vcap.o \ ocelot_flower.o \ ocelot_ptp.o \ - ocelot_devlink.o + ocelot_devlink.o \ + vsc7514_regs.o mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o mscc_ocelot-y := \ + ocelot_fdma.o \ ocelot_vsc7514.o \ ocelot_net.o diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index b1856d8c944b..9b42187a026a 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1054,14 +1054,34 @@ static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh) return 0; } -int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) +void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb, + u64 timestamp) { struct skb_shared_hwtstamps *shhwtstamps; u64 tod_in_ns, full_ts_in_ns; + struct timespec64 ts; + + ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); + + tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); + if ((tod_in_ns & 0xffffffff) < timestamp) + full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | + timestamp; + else + full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) | + timestamp; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = full_ts_in_ns; +} +EXPORT_SYMBOL(ocelot_ptp_rx_timestamp); + +int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) +{ u64 timestamp, src_port, len; u32 xfh[OCELOT_TAG_LEN / 4]; struct net_device *dev; - struct timespec64 ts; struct sk_buff *skb; int sz, buf_len; u32 val, *buf; @@ -1117,21 +1137,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) *buf = val; } - if (ocelot->ptp) { - ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); - - tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); - if ((tod_in_ns & 0xffffffff) < timestamp) - full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | - timestamp; - else - full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) | - timestamp; - - shhwtstamps = skb_hwtstamps(skb); - memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); - shhwtstamps->hwtstamp = full_ts_in_ns; - } + if (ocelot->ptp) + ocelot_ptp_rx_timestamp(ocelot, skb, timestamp); /* Everything we see on an interface that is in the HW bridge * has already been forwarded. @@ -1164,6 +1171,18 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp) } EXPORT_SYMBOL(ocelot_can_inject); +void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag) +{ + ocelot_ifh_set_bypass(ifh, 1); + ocelot_ifh_set_dest(ifh, BIT_ULL(port)); + ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C); + if (vlan_tag) + ocelot_ifh_set_vlan_tci(ifh, vlan_tag); + if (rew_op) + ocelot_ifh_set_rew_op(ifh, rew_op); +} +EXPORT_SYMBOL(ocelot_ifh_port_set); + void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, u32 rew_op, struct sk_buff *skb) { @@ -1173,11 +1192,7 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); - ocelot_ifh_set_bypass(ifh, 1); - ocelot_ifh_set_dest(ifh, BIT_ULL(port)); - ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C); - ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb)); - ocelot_ifh_set_rew_op(ifh, rew_op); + ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb)); for (i = 0; i < OCELOT_TAG_LEN / 4; i++) ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp); @@ -1602,10 +1617,6 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - /* reserved for future extensions */ - if (cfg.flags) - return -EINVAL; - /* Tx type sanity check */ switch (cfg.tx_type) { case HWTSTAMP_TX_ON: diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 1eb0b5ad51e9..bf4eff6d7086 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -32,6 +32,8 @@ #define OCELOT_PTP_QUEUE_SZ 128 +#define OCELOT_JUMBO_MTU 9000 + struct ocelot_port_tc { bool block_shared; unsigned long offload_cnt; diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c new file mode 100644 index 000000000000..dffa597bffe6 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_fdma.c @@ -0,0 +1,894 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi SoCs FDMA driver + * + * Copyright (c) 2021 Microchip + * + * Page recycling code is mostly taken from gianfar driver. + */ + +#include <linux/align.h> +#include <linux/bitops.h> +#include <linux/dmapool.h> +#include <linux/dsa/ocelot.h> +#include <linux/netdevice.h> +#include <linux/of_platform.h> +#include <linux/skbuff.h> + +#include "ocelot_fdma.h" +#include "ocelot_qs.h" + +DEFINE_STATIC_KEY_FALSE(ocelot_fdma_enabled); + +static void ocelot_fdma_writel(struct ocelot *ocelot, u32 reg, u32 data) +{ + regmap_write(ocelot->targets[FDMA], reg, data); +} + +static u32 ocelot_fdma_readl(struct ocelot *ocelot, u32 reg) +{ + u32 retval; + + regmap_read(ocelot->targets[FDMA], reg, &retval); + + return retval; +} + +static dma_addr_t ocelot_fdma_idx_dma(dma_addr_t base, u16 idx) +{ + return base + idx * sizeof(struct ocelot_fdma_dcb); +} + +static u16 ocelot_fdma_dma_idx(dma_addr_t base, dma_addr_t dma) +{ + return (dma - base) / sizeof(struct ocelot_fdma_dcb); +} + +static u16 ocelot_fdma_idx_next(u16 idx, u16 ring_sz) +{ + return unlikely(idx == ring_sz - 1) ? 0 : idx + 1; +} + +static u16 ocelot_fdma_idx_prev(u16 idx, u16 ring_sz) +{ + return unlikely(idx == 0) ? ring_sz - 1 : idx - 1; +} + +static int ocelot_fdma_rx_ring_free(struct ocelot_fdma *fdma) +{ + struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring; + + if (rx_ring->next_to_use >= rx_ring->next_to_clean) + return OCELOT_FDMA_RX_RING_SIZE - + (rx_ring->next_to_use - rx_ring->next_to_clean) - 1; + else + return rx_ring->next_to_clean - rx_ring->next_to_use - 1; +} + +static int ocelot_fdma_tx_ring_free(struct ocelot_fdma *fdma) +{ + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; + + if (tx_ring->next_to_use >= tx_ring->next_to_clean) + return OCELOT_FDMA_TX_RING_SIZE - + (tx_ring->next_to_use - tx_ring->next_to_clean) - 1; + else + return tx_ring->next_to_clean - tx_ring->next_to_use - 1; +} + +static bool ocelot_fdma_tx_ring_empty(struct ocelot_fdma *fdma) +{ + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; + + return tx_ring->next_to_clean == tx_ring->next_to_use; +} + +static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma, + int chan) +{ + ocelot_fdma_writel(ocelot, MSCC_FDMA_DCB_LLP(chan), dma); + /* Barrier to force memory writes to DCB to be completed before starting + * the channel. + */ + wmb(); + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan)); +} + +static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan) +{ + unsigned long timeout; + u32 safe; + + timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US); + do { + safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE); + if (safe & BIT(chan)) + return 0; + } while (time_after(jiffies, timeout)); + + return -ETIMEDOUT; +} + +static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb, + dma_addr_t dma_addr, + size_t size) +{ + u32 offset = dma_addr & 0x3; + + dcb->llp = 0; + dcb->datap = ALIGN_DOWN(dma_addr, 4); + dcb->datal = ALIGN_DOWN(size, 4); + dcb->stat = MSCC_FDMA_DCB_STAT_BLOCKO(offset); +} + +static bool ocelot_fdma_rx_alloc_page(struct ocelot *ocelot, + struct ocelot_fdma_rx_buf *rxb) +{ + dma_addr_t mapping; + struct page *page; + + page = dev_alloc_page(); + if (unlikely(!page)) + return false; + + mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ocelot->dev, mapping))) { + __free_page(page); + return false; + } + + rxb->page = page; + rxb->page_offset = 0; + rxb->dma_addr = mapping; + + return true; +} + +static int ocelot_fdma_alloc_rx_buffs(struct ocelot *ocelot, u16 alloc_cnt) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + struct ocelot_fdma_rx_buf *rxb; + struct ocelot_fdma_dcb *dcb; + dma_addr_t dma_addr; + int ret = 0; + u16 idx; + + rx_ring = &fdma->rx_ring; + idx = rx_ring->next_to_use; + + while (alloc_cnt--) { + rxb = &rx_ring->bufs[idx]; + /* try reuse page */ + if (unlikely(!rxb->page)) { + if (unlikely(!ocelot_fdma_rx_alloc_page(ocelot, rxb))) { + dev_err_ratelimited(ocelot->dev, + "Failed to allocate rx\n"); + ret = -ENOMEM; + break; + } + } + + dcb = &rx_ring->dcbs[idx]; + dma_addr = rxb->dma_addr + rxb->page_offset; + ocelot_fdma_dcb_set_data(dcb, dma_addr, OCELOT_FDMA_RXB_SIZE); + + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + /* Chain the DCB to the next one */ + dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx); + } + + rx_ring->next_to_use = idx; + rx_ring->next_to_alloc = idx; + + return ret; +} + +static bool ocelot_fdma_tx_dcb_set_skb(struct ocelot *ocelot, + struct ocelot_fdma_tx_buf *tx_buf, + struct ocelot_fdma_dcb *dcb, + struct sk_buff *skb) +{ + dma_addr_t mapping; + + mapping = dma_map_single(ocelot->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(ocelot->dev, mapping))) + return false; + + dma_unmap_addr_set(tx_buf, dma_addr, mapping); + + ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE); + tx_buf->skb = skb; + dcb->stat |= MSCC_FDMA_DCB_STAT_BLOCKL(skb->len); + dcb->stat |= MSCC_FDMA_DCB_STAT_SOF | MSCC_FDMA_DCB_STAT_EOF; + + return true; +} + +static bool ocelot_fdma_check_stop_rx(struct ocelot *ocelot) +{ + u32 llp; + + /* Check if the FDMA hits the DCB with LLP == NULL */ + llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP(MSCC_FDMA_XTR_CHAN)); + if (unlikely(llp)) + return false; + + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_DISABLE, + BIT(MSCC_FDMA_XTR_CHAN)); + + return true; +} + +static void ocelot_fdma_rx_set_llp(struct ocelot_fdma_rx_ring *rx_ring) +{ + struct ocelot_fdma_dcb *dcb; + unsigned int idx; + + idx = ocelot_fdma_idx_prev(rx_ring->next_to_use, + OCELOT_FDMA_RX_RING_SIZE); + dcb = &rx_ring->dcbs[idx]; + dcb->llp = 0; +} + +static void ocelot_fdma_rx_restart(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + const u8 chan = MSCC_FDMA_XTR_CHAN; + dma_addr_t new_llp, dma_base; + unsigned int idx; + u32 llp_prev; + int ret; + + rx_ring = &fdma->rx_ring; + ret = ocelot_fdma_wait_chan_safe(ocelot, chan); + if (ret) { + dev_err_ratelimited(ocelot->dev, + "Unable to stop RX channel\n"); + return; + } + + ocelot_fdma_rx_set_llp(rx_ring); + + /* FDMA stopped on the last DCB that contained a NULL LLP, since + * we processed some DCBs in RX, there is free space, and we must set + * DCB_LLP to point to the next DCB + */ + llp_prev = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP_PREV(chan)); + dma_base = rx_ring->dcbs_dma; + + /* Get the next DMA addr located after LLP == NULL DCB */ + idx = ocelot_fdma_dma_idx(dma_base, llp_prev); + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + new_llp = ocelot_fdma_idx_dma(dma_base, idx); + + /* Finally reactivate the channel */ + ocelot_fdma_activate_chan(ocelot, new_llp, chan); +} + +static bool ocelot_fdma_add_rx_frag(struct ocelot_fdma_rx_buf *rxb, u32 stat, + struct sk_buff *skb, bool first) +{ + int size = MSCC_FDMA_DCB_STAT_BLOCKL(stat); + struct page *page = rxb->page; + + if (likely(first)) { + skb_put(skb, size); + } else { + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset, size, OCELOT_FDMA_RX_SIZE); + } + + /* Try to reuse page */ + if (unlikely(page_ref_count(page) != 1 || page_is_pfmemalloc(page))) + return false; + + /* Change offset to the other half */ + rxb->page_offset ^= OCELOT_FDMA_RX_SIZE; + + page_ref_inc(page); + + return true; +} + +static void ocelot_fdma_reuse_rx_page(struct ocelot *ocelot, + struct ocelot_fdma_rx_buf *old_rxb) +{ + struct ocelot_fdma_rx_ring *rx_ring = &ocelot->fdma->rx_ring; + struct ocelot_fdma_rx_buf *new_rxb; + + new_rxb = &rx_ring->bufs[rx_ring->next_to_alloc]; + rx_ring->next_to_alloc = ocelot_fdma_idx_next(rx_ring->next_to_alloc, + OCELOT_FDMA_RX_RING_SIZE); + + /* Copy page reference */ + *new_rxb = *old_rxb; + + /* Sync for use by the device */ + dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr, + old_rxb->page_offset, + OCELOT_FDMA_RX_SIZE, DMA_FROM_DEVICE); +} + +static struct sk_buff *ocelot_fdma_get_skb(struct ocelot *ocelot, u32 stat, + struct ocelot_fdma_rx_buf *rxb, + struct sk_buff *skb) +{ + bool first = false; + + /* Allocate skb head and data */ + if (likely(!skb)) { + void *buff_addr = page_address(rxb->page) + + rxb->page_offset; + + skb = build_skb(buff_addr, OCELOT_FDMA_SKBFRAG_SIZE); + if (unlikely(!skb)) { + dev_err_ratelimited(ocelot->dev, + "build_skb failed !\n"); + return NULL; + } + first = true; + } + + dma_sync_single_range_for_cpu(ocelot->dev, rxb->dma_addr, + rxb->page_offset, OCELOT_FDMA_RX_SIZE, + DMA_FROM_DEVICE); + + if (ocelot_fdma_add_rx_frag(rxb, stat, skb, first)) { + /* Reuse the free half of the page for the next_to_alloc DCB*/ + ocelot_fdma_reuse_rx_page(ocelot, rxb); + } else { + /* page cannot be reused, unmap it */ + dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + } + + /* clear rx buff content */ + rxb->page = NULL; + + return skb; +} + +static bool ocelot_fdma_receive_skb(struct ocelot *ocelot, struct sk_buff *skb) +{ + struct net_device *ndev; + void *xfh = skb->data; + u64 timestamp; + u64 src_port; + + skb_pull(skb, OCELOT_TAG_LEN); + + ocelot_xfh_get_src_port(xfh, &src_port); + if (unlikely(src_port >= ocelot->num_phys_ports)) + return false; + + ndev = ocelot_port_to_netdev(ocelot, src_port); + if (unlikely(!ndev)) + return false; + + pskb_trim(skb, skb->len - ETH_FCS_LEN); + + skb->dev = ndev; + skb->protocol = eth_type_trans(skb, skb->dev); + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + + if (ocelot->ptp) { + ocelot_xfh_get_rew_val(xfh, ×tamp); + ocelot_ptp_rx_timestamp(ocelot, skb, timestamp); + } + + if (likely(!skb_defer_rx_timestamp(skb))) + netif_receive_skb(skb); + + return true; +} + +static int ocelot_fdma_rx_get(struct ocelot *ocelot, int budget) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + struct ocelot_fdma_rx_buf *rxb; + struct ocelot_fdma_dcb *dcb; + struct sk_buff *skb; + int work_done = 0; + int cleaned_cnt; + u32 stat; + u16 idx; + + cleaned_cnt = ocelot_fdma_rx_ring_free(fdma); + rx_ring = &fdma->rx_ring; + skb = rx_ring->skb; + + while (budget--) { + idx = rx_ring->next_to_clean; + dcb = &rx_ring->dcbs[idx]; + stat = dcb->stat; + if (MSCC_FDMA_DCB_STAT_BLOCKL(stat) == 0) + break; + + /* New packet is a start of frame but we already got a skb set, + * we probably lost an EOF packet, free skb + */ + if (unlikely(skb && (stat & MSCC_FDMA_DCB_STAT_SOF))) { + dev_kfree_skb(skb); + skb = NULL; + } + + rxb = &rx_ring->bufs[idx]; + /* Fetch next to clean buffer from the rx_ring */ + skb = ocelot_fdma_get_skb(ocelot, stat, rxb, skb); + if (unlikely(!skb)) + break; + + work_done++; + cleaned_cnt++; + + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + rx_ring->next_to_clean = idx; + + if (unlikely(stat & MSCC_FDMA_DCB_STAT_ABORT || + stat & MSCC_FDMA_DCB_STAT_PD)) { + dev_err_ratelimited(ocelot->dev, + "DCB aborted or pruned\n"); + dev_kfree_skb(skb); + skb = NULL; + continue; + } + + /* We still need to process the other fragment of the packet + * before delivering it to the network stack + */ + if (!(stat & MSCC_FDMA_DCB_STAT_EOF)) + continue; + + if (unlikely(!ocelot_fdma_receive_skb(ocelot, skb))) + dev_kfree_skb(skb); + + skb = NULL; + } + + rx_ring->skb = skb; + + if (cleaned_cnt) + ocelot_fdma_alloc_rx_buffs(ocelot, cleaned_cnt); + + return work_done; +} + +static void ocelot_fdma_wakeup_netdev(struct ocelot *ocelot) +{ + struct ocelot_port_private *priv; + struct ocelot_port *ocelot_port; + struct net_device *dev; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + ocelot_port = ocelot->ports[port]; + if (!ocelot_port) + continue; + priv = container_of(ocelot_port, struct ocelot_port_private, + port); + dev = priv->dev; + + if (unlikely(netif_queue_stopped(dev))) + netif_wake_queue(dev); + } +} + +static void ocelot_fdma_tx_cleanup(struct ocelot *ocelot, int budget) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_tx_ring *tx_ring; + struct ocelot_fdma_tx_buf *buf; + unsigned int new_null_llp_idx; + struct ocelot_fdma_dcb *dcb; + bool end_of_list = false; + struct sk_buff *skb; + dma_addr_t dma; + u32 dcb_llp; + u16 ntc; + int ret; + + tx_ring = &fdma->tx_ring; + + /* Purge the TX packets that have been sent up to the NULL llp or the + * end of done list. + */ + while (!ocelot_fdma_tx_ring_empty(fdma)) { + ntc = tx_ring->next_to_clean; + dcb = &tx_ring->dcbs[ntc]; + if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD)) + break; + + buf = &tx_ring->bufs[ntc]; + skb = buf->skb; + dma_unmap_single(ocelot->dev, dma_unmap_addr(buf, dma_addr), + skb->len, DMA_TO_DEVICE); + napi_consume_skb(skb, budget); + dcb_llp = dcb->llp; + + /* Only update after accessing all dcb fields */ + tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc, + OCELOT_FDMA_TX_RING_SIZE); + + /* If we hit the NULL LLP, stop, we might need to reload FDMA */ + if (dcb_llp == 0) { + end_of_list = true; + break; + } + } + + /* No need to try to wake if there were no TX cleaned_cnt up. */ + if (ocelot_fdma_tx_ring_free(fdma)) + ocelot_fdma_wakeup_netdev(ocelot); + + /* If there is still some DCBs to be processed by the FDMA or if the + * pending list is empty, there is no need to restart the FDMA. + */ + if (!end_of_list || ocelot_fdma_tx_ring_empty(fdma)) + return; + + ret = ocelot_fdma_wait_chan_safe(ocelot, MSCC_FDMA_INJ_CHAN); + if (ret) { + dev_warn(ocelot->dev, + "Failed to wait for TX channel to stop\n"); + return; + } + + /* Set NULL LLP to be the last DCB used */ + new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use, + OCELOT_FDMA_TX_RING_SIZE); + dcb = &tx_ring->dcbs[new_null_llp_idx]; + dcb->llp = 0; + + dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, tx_ring->next_to_clean); + ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN); +} + +static int ocelot_fdma_napi_poll(struct napi_struct *napi, int budget) +{ + struct ocelot_fdma *fdma = container_of(napi, struct ocelot_fdma, napi); + struct ocelot *ocelot = fdma->ocelot; + int work_done = 0; + bool rx_stopped; + + ocelot_fdma_tx_cleanup(ocelot, budget); + + rx_stopped = ocelot_fdma_check_stop_rx(ocelot); + + work_done = ocelot_fdma_rx_get(ocelot, budget); + + if (rx_stopped) + ocelot_fdma_rx_restart(ocelot); + + if (work_done < budget) { + napi_complete_done(&fdma->napi, work_done); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, + BIT(MSCC_FDMA_INJ_CHAN) | + BIT(MSCC_FDMA_XTR_CHAN)); + } + + return work_done; +} + +static irqreturn_t ocelot_fdma_interrupt(int irq, void *dev_id) +{ + u32 ident, llp, frm, err, err_code; + struct ocelot *ocelot = dev_id; + + ident = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_IDENT); + frm = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_FRM); + llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_LLP); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, llp & ident); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, frm & ident); + if (frm || llp) { + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); + napi_schedule(&ocelot->fdma->napi); + } + + err = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR); + if (unlikely(err)) { + err_code = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR_CODE); + dev_err_ratelimited(ocelot->dev, + "Error ! chans mask: %#x, code: %#x\n", + err, err_code); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR, err); + ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR_CODE, err_code); + } + + return IRQ_HANDLED; +} + +static void ocelot_fdma_send_skb(struct ocelot *ocelot, + struct ocelot_fdma *fdma, struct sk_buff *skb) +{ + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; + struct ocelot_fdma_tx_buf *tx_buf; + struct ocelot_fdma_dcb *dcb; + dma_addr_t dma; + u16 next_idx; + + dcb = &tx_ring->dcbs[tx_ring->next_to_use]; + tx_buf = &tx_ring->bufs[tx_ring->next_to_use]; + if (!ocelot_fdma_tx_dcb_set_skb(ocelot, tx_buf, dcb, skb)) { + dev_kfree_skb_any(skb); + return; + } + + next_idx = ocelot_fdma_idx_next(tx_ring->next_to_use, + OCELOT_FDMA_TX_RING_SIZE); + skb_tx_timestamp(skb); + + /* If the FDMA TX chan is empty, then enqueue the DCB directly */ + if (ocelot_fdma_tx_ring_empty(fdma)) { + dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, + tx_ring->next_to_use); + ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN); + } else { + /* Chain the DCBs */ + dcb->llp = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, next_idx); + } + + tx_ring->next_to_use = next_idx; +} + +static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op, + struct sk_buff *skb, struct net_device *dev) +{ + int needed_headroom = max_t(int, OCELOT_TAG_LEN - skb_headroom(skb), 0); + int needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); + void *ifh; + int err; + + if (unlikely(needed_headroom || needed_tailroom || + skb_header_cloned(skb))) { + err = pskb_expand_head(skb, needed_headroom, needed_tailroom, + GFP_ATOMIC); + if (unlikely(err)) { + dev_kfree_skb_any(skb); + return 1; + } + } + + err = skb_linearize(skb); + if (err) { + net_err_ratelimited("%s: skb_linearize error (%d)!\n", + dev->name, err); + dev_kfree_skb_any(skb); + return 1; + } + + ifh = skb_push(skb, OCELOT_TAG_LEN); + skb_put(skb, ETH_FCS_LEN); + memset(ifh, 0, OCELOT_TAG_LEN); + ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb)); + + return 0; +} + +int ocelot_fdma_inject_frame(struct ocelot *ocelot, int port, u32 rew_op, + struct sk_buff *skb, struct net_device *dev) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + int ret = NETDEV_TX_OK; + + spin_lock(&fdma->tx_ring.xmit_lock); + + if (ocelot_fdma_tx_ring_free(fdma) == 0) { + netif_stop_queue(dev); + ret = NETDEV_TX_BUSY; + goto out; + } + + if (ocelot_fdma_prepare_skb(ocelot, port, rew_op, skb, dev)) + goto out; + + ocelot_fdma_send_skb(ocelot, fdma, skb); + +out: + spin_unlock(&fdma->tx_ring.xmit_lock); + + return ret; +} + +static void ocelot_fdma_free_rx_ring(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + struct ocelot_fdma_rx_buf *rxb; + u16 idx; + + rx_ring = &fdma->rx_ring; + idx = rx_ring->next_to_clean; + + /* Free the pages held in the RX ring */ + while (idx != rx_ring->next_to_use) { + rxb = &rx_ring->bufs[idx]; + dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(rxb->page); + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + } + + if (fdma->rx_ring.skb) + dev_kfree_skb_any(fdma->rx_ring.skb); +} + +static void ocelot_fdma_free_tx_ring(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_tx_ring *tx_ring; + struct ocelot_fdma_tx_buf *txb; + struct sk_buff *skb; + u16 idx; + + tx_ring = &fdma->tx_ring; + idx = tx_ring->next_to_clean; + + while (idx != tx_ring->next_to_use) { + txb = &tx_ring->bufs[idx]; + skb = txb->skb; + dma_unmap_single(ocelot->dev, dma_unmap_addr(txb, dma_addr), + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_TX_RING_SIZE); + } +} + +static int ocelot_fdma_rings_alloc(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_dcb *dcbs; + unsigned int adjust; + dma_addr_t dcbs_dma; + int ret; + + /* Create a pool of consistent memory blocks for hardware descriptors */ + fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev, + OCELOT_DCBS_HW_ALLOC_SIZE, + &fdma->dcbs_dma_base, GFP_KERNEL); + if (!fdma->dcbs_base) + return -ENOMEM; + + /* DCBs must be aligned on a 32bit boundary */ + dcbs = fdma->dcbs_base; + dcbs_dma = fdma->dcbs_dma_base; + if (!IS_ALIGNED(dcbs_dma, 4)) { + adjust = dcbs_dma & 0x3; + dcbs_dma = ALIGN(dcbs_dma, 4); + dcbs = (void *)dcbs + adjust; + } + + /* TX queue */ + fdma->tx_ring.dcbs = dcbs; + fdma->tx_ring.dcbs_dma = dcbs_dma; + spin_lock_init(&fdma->tx_ring.xmit_lock); + + /* RX queue */ + fdma->rx_ring.dcbs = dcbs + OCELOT_FDMA_TX_RING_SIZE; + fdma->rx_ring.dcbs_dma = dcbs_dma + OCELOT_FDMA_TX_DCB_SIZE; + ret = ocelot_fdma_alloc_rx_buffs(ocelot, + ocelot_fdma_tx_ring_free(fdma)); + if (ret) { + ocelot_fdma_free_rx_ring(ocelot); + return ret; + } + + /* Set the last DCB LLP as NULL, this is normally done when restarting + * the RX chan, but this is for the first run + */ + ocelot_fdma_rx_set_llp(&fdma->rx_ring); + + return 0; +} + +void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + dev->needed_headroom = OCELOT_TAG_LEN; + dev->needed_tailroom = ETH_FCS_LEN; + + if (fdma->ndev) + return; + + fdma->ndev = dev; + netif_napi_add(dev, &fdma->napi, ocelot_fdma_napi_poll, + OCELOT_FDMA_WEIGHT); +} + +void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + if (fdma->ndev == dev) { + netif_napi_del(&fdma->napi); + fdma->ndev = NULL; + } +} + +void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot) +{ + struct device *dev = ocelot->dev; + struct ocelot_fdma *fdma; + int ret; + + fdma = devm_kzalloc(dev, sizeof(*fdma), GFP_KERNEL); + if (!fdma) + return; + + ocelot->fdma = fdma; + ocelot->dev->coherent_dma_mask = DMA_BIT_MASK(32); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); + + fdma->ocelot = ocelot; + fdma->irq = platform_get_irq_byname(pdev, "fdma"); + ret = devm_request_irq(dev, fdma->irq, ocelot_fdma_interrupt, 0, + dev_name(dev), ocelot); + if (ret) + goto err_free_fdma; + + ret = ocelot_fdma_rings_alloc(ocelot); + if (ret) + goto err_free_irq; + + static_branch_enable(&ocelot_fdma_enabled); + + return; + +err_free_irq: + devm_free_irq(dev, fdma->irq, fdma); +err_free_fdma: + devm_kfree(dev, fdma); + + ocelot->fdma = NULL; +} + +void ocelot_fdma_start(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + /* Reconfigure for extraction and injection using DMA */ + ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_MODE(2), QS_INJ_GRP_CFG, 0); + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(0), QS_INJ_CTRL, 0); + + ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_MODE(2), QS_XTR_GRP_CFG, 0); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, 0xffffffff); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, 0xffffffff); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP_ENA, + BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN)); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM_ENA, + BIT(MSCC_FDMA_XTR_CHAN)); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, + BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN)); + + napi_enable(&fdma->napi); + + ocelot_fdma_activate_chan(ocelot, ocelot->fdma->rx_ring.dcbs_dma, + MSCC_FDMA_XTR_CHAN); +} + +void ocelot_fdma_deinit(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS, + BIT(MSCC_FDMA_XTR_CHAN)); + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS, + BIT(MSCC_FDMA_INJ_CHAN)); + napi_synchronize(&fdma->napi); + napi_disable(&fdma->napi); + + ocelot_fdma_free_rx_ring(ocelot); + ocelot_fdma_free_tx_ring(ocelot); +} diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.h b/drivers/net/ethernet/mscc/ocelot_fdma.h new file mode 100644 index 000000000000..2fc8e1dd7230 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_fdma.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi SoCs FDMA driver + * + * Copyright (c) 2021 Microchip + */ +#ifndef _MSCC_OCELOT_FDMA_H_ +#define _MSCC_OCELOT_FDMA_H_ + +#include "ocelot.h" + +#define MSCC_FDMA_DCB_STAT_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define MSCC_FDMA_DCB_STAT_BLOCKO_M GENMASK(31, 20) +#define MSCC_FDMA_DCB_STAT_BLOCKO_X(x) (((x) & GENMASK(31, 20)) >> 20) +#define MSCC_FDMA_DCB_STAT_PD BIT(19) +#define MSCC_FDMA_DCB_STAT_ABORT BIT(18) +#define MSCC_FDMA_DCB_STAT_EOF BIT(17) +#define MSCC_FDMA_DCB_STAT_SOF BIT(16) +#define MSCC_FDMA_DCB_STAT_BLOCKL_M GENMASK(15, 0) +#define MSCC_FDMA_DCB_STAT_BLOCKL(x) ((x) & GENMASK(15, 0)) + +#define MSCC_FDMA_DCB_LLP(x) ((x) * 4 + 0x0) +#define MSCC_FDMA_DCB_LLP_PREV(x) ((x) * 4 + 0xA0) +#define MSCC_FDMA_CH_SAFE 0xcc +#define MSCC_FDMA_CH_ACTIVATE 0xd0 +#define MSCC_FDMA_CH_DISABLE 0xd4 +#define MSCC_FDMA_CH_FORCEDIS 0xd8 +#define MSCC_FDMA_EVT_ERR 0x164 +#define MSCC_FDMA_EVT_ERR_CODE 0x168 +#define MSCC_FDMA_INTR_LLP 0x16c +#define MSCC_FDMA_INTR_LLP_ENA 0x170 +#define MSCC_FDMA_INTR_FRM 0x174 +#define MSCC_FDMA_INTR_FRM_ENA 0x178 +#define MSCC_FDMA_INTR_ENA 0x184 +#define MSCC_FDMA_INTR_IDENT 0x188 + +#define MSCC_FDMA_INJ_CHAN 2 +#define MSCC_FDMA_XTR_CHAN 0 + +#define OCELOT_FDMA_WEIGHT 32 + +#define OCELOT_FDMA_CH_SAFE_TIMEOUT_US 10 + +#define OCELOT_FDMA_RX_RING_SIZE 512 +#define OCELOT_FDMA_TX_RING_SIZE 128 + +#define OCELOT_FDMA_RX_DCB_SIZE (OCELOT_FDMA_RX_RING_SIZE * \ + sizeof(struct ocelot_fdma_dcb)) +#define OCELOT_FDMA_TX_DCB_SIZE (OCELOT_FDMA_TX_RING_SIZE * \ + sizeof(struct ocelot_fdma_dcb)) +/* +4 allows for word alignment after allocation */ +#define OCELOT_DCBS_HW_ALLOC_SIZE (OCELOT_FDMA_RX_DCB_SIZE + \ + OCELOT_FDMA_TX_DCB_SIZE + \ + 4) + +#define OCELOT_FDMA_RX_SIZE (PAGE_SIZE / 2) + +#define OCELOT_FDMA_SKBFRAG_OVR (4 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define OCELOT_FDMA_RXB_SIZE ALIGN_DOWN(OCELOT_FDMA_RX_SIZE - OCELOT_FDMA_SKBFRAG_OVR, 4) +#define OCELOT_FDMA_SKBFRAG_SIZE (OCELOT_FDMA_RXB_SIZE + OCELOT_FDMA_SKBFRAG_OVR) + +DECLARE_STATIC_KEY_FALSE(ocelot_fdma_enabled); + +struct ocelot_fdma_dcb { + u32 llp; + u32 datap; + u32 datal; + u32 stat; +} __packed; + +/** + * struct ocelot_fdma_tx_buf - TX buffer structure + * @skb: SKB currently used in the corresponding DCB. + * @dma_addr: SKB DMA mapped address. + */ +struct ocelot_fdma_tx_buf { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma_addr); +}; + +/** + * struct ocelot_fdma_tx_ring - TX ring description of DCBs + * + * @dcbs: DCBs allocated for the ring + * @dcbs_dma: DMA base address of the DCBs + * @bufs: List of TX buffer associated to the DCBs + * @xmit_lock: lock for concurrent xmit access + * @next_to_clean: Next DCB to be cleaned in tx_cleanup + * @next_to_use: Next available DCB to send SKB + */ +struct ocelot_fdma_tx_ring { + struct ocelot_fdma_dcb *dcbs; + dma_addr_t dcbs_dma; + struct ocelot_fdma_tx_buf bufs[OCELOT_FDMA_TX_RING_SIZE]; + /* Protect concurrent xmit calls */ + spinlock_t xmit_lock; + u16 next_to_clean; + u16 next_to_use; +}; + +/** + * struct ocelot_fdma_rx_buf - RX buffer structure + * @page: Struct page used in this buffer + * @page_offset: Current page offset (either 0 or PAGE_SIZE/2) + * @dma_addr: DMA address of the page + */ +struct ocelot_fdma_rx_buf { + struct page *page; + u32 page_offset; + dma_addr_t dma_addr; +}; + +/** + * struct ocelot_fdma_rx_ring - TX ring description of DCBs + * + * @dcbs: DCBs allocated for the ring + * @dcbs_dma: DMA base address of the DCBs + * @bufs: List of RX buffer associated to the DCBs + * @skb: SKB currently received by the netdev + * @next_to_clean: Next DCB to be cleaned NAPI polling + * @next_to_use: Next available DCB to send SKB + * @next_to_alloc: Next buffer that needs to be allocated (page reuse or alloc) + */ +struct ocelot_fdma_rx_ring { + struct ocelot_fdma_dcb *dcbs; + dma_addr_t dcbs_dma; + struct ocelot_fdma_rx_buf bufs[OCELOT_FDMA_RX_RING_SIZE]; + struct sk_buff *skb; + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; +}; + +/** + * struct ocelot_fdma - FDMA context + * + * @irq: FDMA interrupt + * @ndev: Net device used to initialize NAPI + * @dcbs_base: Memory coherent DCBs + * @dcbs_dma_base: DMA base address of memory coherent DCBs + * @tx_ring: Injection ring + * @rx_ring: Extraction ring + * @napi: NAPI context + * @ocelot: Back-pointer to ocelot struct + */ +struct ocelot_fdma { + int irq; + struct net_device *ndev; + struct ocelot_fdma_dcb *dcbs_base; + dma_addr_t dcbs_dma_base; + struct ocelot_fdma_tx_ring tx_ring; + struct ocelot_fdma_rx_ring rx_ring; + struct napi_struct napi; + struct ocelot *ocelot; +}; + +void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot); +void ocelot_fdma_start(struct ocelot *ocelot); +void ocelot_fdma_deinit(struct ocelot *ocelot); +int ocelot_fdma_inject_frame(struct ocelot *fdma, int port, u32 rew_op, + struct sk_buff *skb, struct net_device *dev); +void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev); +void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, + struct net_device *dev); + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 0fcf359a6975..8115c3db252e 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -15,6 +15,7 @@ #include <net/pkt_cls.h> #include "ocelot.h" #include "ocelot_vcap.h" +#include "ocelot_fdma.h" #define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP @@ -457,7 +458,8 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) int port = priv->chip_port; u32 rew_op = 0; - if (!ocelot_can_inject(ocelot, 0)) + if (!static_branch_unlikely(&ocelot_fdma_enabled) && + !ocelot_can_inject(ocelot, 0)) return NETDEV_TX_BUSY; /* Check if timestamping is needed */ @@ -475,9 +477,13 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) rew_op = ocelot_ptp_rew_op(skb); } - ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); + if (static_branch_unlikely(&ocelot_fdma_enabled)) { + ocelot_fdma_inject_frame(ocelot, port, rew_op, skb, dev); + } else { + ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); - kfree_skb(skb); + consume_skb(skb); + } return NETDEV_TX_OK; } @@ -764,10 +770,23 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return phy_mii_ioctl(dev->phydev, ifr, cmd); } +static int ocelot_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + + ocelot_port_set_maxlen(ocelot, priv->chip_port, new_mtu); + WRITE_ONCE(dev->mtu, new_mtu); + + return 0; +} + static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_open = ocelot_port_open, .ndo_stop = ocelot_port_stop, .ndo_start_xmit = ocelot_port_xmit, + .ndo_change_mtu = ocelot_change_mtu, .ndo_set_rx_mode = ocelot_set_rx_mode, .ndo_set_mac_address = ocelot_port_set_mac_address, .ndo_get_stats64 = ocelot_get_stats64, @@ -1670,12 +1689,16 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, dev->netdev_ops = &ocelot_port_netdev_ops; dev->ethtool_ops = &ocelot_ethtool_ops; + dev->max_mtu = OCELOT_JUMBO_MTU; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS | NETIF_F_HW_TC; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; - eth_hw_addr_gen(dev, ocelot->base_mac, port); + err = of_get_ethdev_address(portnp, dev); + if (err) + eth_hw_addr_gen(dev, ocelot->base_mac, port); + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED); @@ -1685,14 +1708,20 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, if (err) goto out; + if (ocelot->fdma) + ocelot_fdma_netdev_init(ocelot, dev); + err = register_netdev(dev); if (err) { dev_err(ocelot->dev, "register_netdev failed\n"); - goto out; + goto out_fdma_deinit; } return 0; +out_fdma_deinit: + if (ocelot->fdma) + ocelot_fdma_netdev_deinit(ocelot, dev); out: ocelot->ports[port] = NULL; free_netdev(dev); @@ -1705,9 +1734,14 @@ void ocelot_release_port(struct ocelot_port *ocelot_port) struct ocelot_port_private *priv = container_of(ocelot_port, struct ocelot_port_private, port); + struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_fdma *fdma = ocelot->fdma; unregister_netdev(priv->dev); + if (fdma) + ocelot_fdma_netdev_deinit(ocelot, priv->dev); + if (priv->phylink) { rtnl_lock(); phylink_disconnect_phy(priv->phylink); diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index cd3eb101f159..4f4a495a60ad 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -18,316 +18,24 @@ #include <soc/mscc/ocelot_vcap.h> #include <soc/mscc/ocelot_hsio.h> +#include <soc/mscc/vsc7514_regs.h> +#include "ocelot_fdma.h" #include "ocelot.h" #define VSC7514_VCAP_POLICER_BASE 128 #define VSC7514_VCAP_POLICER_MAX 191 -static const u32 ocelot_ana_regmap[] = { - REG(ANA_ADVLEARN, 0x009000), - REG(ANA_VLANMASK, 0x009004), - REG(ANA_PORT_B_DOMAIN, 0x009008), - REG(ANA_ANAGEFIL, 0x00900c), - REG(ANA_ANEVENTS, 0x009010), - REG(ANA_STORMLIMIT_BURST, 0x009014), - REG(ANA_STORMLIMIT_CFG, 0x009018), - REG(ANA_ISOLATED_PORTS, 0x009028), - REG(ANA_COMMUNITY_PORTS, 0x00902c), - REG(ANA_AUTOAGE, 0x009030), - REG(ANA_MACTOPTIONS, 0x009034), - REG(ANA_LEARNDISC, 0x009038), - REG(ANA_AGENCTRL, 0x00903c), - REG(ANA_MIRRORPORTS, 0x009040), - REG(ANA_EMIRRORPORTS, 0x009044), - REG(ANA_FLOODING, 0x009048), - REG(ANA_FLOODING_IPMC, 0x00904c), - REG(ANA_SFLOW_CFG, 0x009050), - REG(ANA_PORT_MODE, 0x009080), - REG(ANA_PGID_PGID, 0x008c00), - REG(ANA_TABLES_ANMOVED, 0x008b30), - REG(ANA_TABLES_MACHDATA, 0x008b34), - REG(ANA_TABLES_MACLDATA, 0x008b38), - REG(ANA_TABLES_MACACCESS, 0x008b3c), - REG(ANA_TABLES_MACTINDX, 0x008b40), - REG(ANA_TABLES_VLANACCESS, 0x008b44), - REG(ANA_TABLES_VLANTIDX, 0x008b48), - REG(ANA_TABLES_ISDXACCESS, 0x008b4c), - REG(ANA_TABLES_ISDXTIDX, 0x008b50), - REG(ANA_TABLES_ENTRYLIM, 0x008b00), - REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54), - REG(ANA_TABLES_PTP_ID_LOW, 0x008b58), - REG(ANA_MSTI_STATE, 0x008e00), - REG(ANA_PORT_VLAN_CFG, 0x007000), - REG(ANA_PORT_DROP_CFG, 0x007004), - REG(ANA_PORT_QOS_CFG, 0x007008), - REG(ANA_PORT_VCAP_CFG, 0x00700c), - REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010), - REG(ANA_PORT_VCAP_S2_CFG, 0x00701c), - REG(ANA_PORT_PCP_DEI_MAP, 0x007020), - REG(ANA_PORT_CPU_FWD_CFG, 0x007060), - REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064), - REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068), - REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c), - REG(ANA_PORT_PORT_CFG, 0x007070), - REG(ANA_PORT_POL_CFG, 0x007074), - REG(ANA_PORT_PTP_CFG, 0x007078), - REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c), - REG(ANA_OAM_UPM_LM_CNT, 0x007c00), - REG(ANA_PORT_PTP_DLY2_CFG, 0x007080), - REG(ANA_PFC_PFC_CFG, 0x008800), - REG(ANA_PFC_PFC_TIMER, 0x008804), - REG(ANA_IPT_OAM_MEP_CFG, 0x008000), - REG(ANA_IPT_IPT, 0x008004), - REG(ANA_PPT_PPT, 0x008ac0), - REG(ANA_FID_MAP_FID_MAP, 0x000000), - REG(ANA_AGGR_CFG, 0x0090b4), - REG(ANA_CPUQ_CFG, 0x0090b8), - REG(ANA_CPUQ_CFG2, 0x0090bc), - REG(ANA_CPUQ_8021_CFG, 0x0090c0), - REG(ANA_DSCP_CFG, 0x009100), - REG(ANA_DSCP_REWR_CFG, 0x009200), - REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240), - REG(ANA_VCAP_RNG_VAL_CFG, 0x009260), - REG(ANA_VRAP_CFG, 0x009280), - REG(ANA_VRAP_HDR_DATA, 0x009284), - REG(ANA_VRAP_HDR_MASK, 0x009288), - REG(ANA_DISCARD_CFG, 0x00928c), - REG(ANA_FID_CFG, 0x009290), - REG(ANA_POL_PIR_CFG, 0x004000), - REG(ANA_POL_CIR_CFG, 0x004004), - REG(ANA_POL_MODE_CFG, 0x004008), - REG(ANA_POL_PIR_STATE, 0x00400c), - REG(ANA_POL_CIR_STATE, 0x004010), - REG(ANA_POL_STATE, 0x004014), - REG(ANA_POL_FLOWC, 0x008b80), - REG(ANA_POL_HYST, 0x008bec), - REG(ANA_POL_MISC_CFG, 0x008bf0), -}; - -static const u32 ocelot_qs_regmap[] = { - REG(QS_XTR_GRP_CFG, 0x000000), - REG(QS_XTR_RD, 0x000008), - REG(QS_XTR_FRM_PRUNING, 0x000010), - REG(QS_XTR_FLUSH, 0x000018), - REG(QS_XTR_DATA_PRESENT, 0x00001c), - REG(QS_XTR_CFG, 0x000020), - REG(QS_INJ_GRP_CFG, 0x000024), - REG(QS_INJ_WR, 0x00002c), - REG(QS_INJ_CTRL, 0x000034), - REG(QS_INJ_STATUS, 0x00003c), - REG(QS_INJ_ERR, 0x000040), - REG(QS_INH_DBG, 0x000048), -}; - -static const u32 ocelot_qsys_regmap[] = { - REG(QSYS_PORT_MODE, 0x011200), - REG(QSYS_SWITCH_PORT_MODE, 0x011234), - REG(QSYS_STAT_CNT_CFG, 0x011264), - REG(QSYS_EEE_CFG, 0x011268), - REG(QSYS_EEE_THRES, 0x011294), - REG(QSYS_IGR_NO_SHARING, 0x011298), - REG(QSYS_EGR_NO_SHARING, 0x01129c), - REG(QSYS_SW_STATUS, 0x0112a0), - REG(QSYS_EXT_CPU_CFG, 0x0112d0), - REG(QSYS_PAD_CFG, 0x0112d4), - REG(QSYS_CPU_GROUP_MAP, 0x0112d8), - REG(QSYS_QMAP, 0x0112dc), - REG(QSYS_ISDX_SGRP, 0x011400), - REG(QSYS_TIMED_FRAME_ENTRY, 0x014000), - REG(QSYS_TFRM_MISC, 0x011310), - REG(QSYS_TFRM_PORT_DLY, 0x011314), - REG(QSYS_TFRM_TIMER_CFG_1, 0x011318), - REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c), - REG(QSYS_TFRM_TIMER_CFG_3, 0x011320), - REG(QSYS_TFRM_TIMER_CFG_4, 0x011324), - REG(QSYS_TFRM_TIMER_CFG_5, 0x011328), - REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c), - REG(QSYS_TFRM_TIMER_CFG_7, 0x011330), - REG(QSYS_TFRM_TIMER_CFG_8, 0x011334), - REG(QSYS_RED_PROFILE, 0x011338), - REG(QSYS_RES_QOS_MODE, 0x011378), - REG(QSYS_RES_CFG, 0x012000), - REG(QSYS_RES_STAT, 0x012004), - REG(QSYS_EGR_DROP_MODE, 0x01137c), - REG(QSYS_EQ_CTRL, 0x011380), - REG(QSYS_EVENTS_CORE, 0x011384), - REG(QSYS_CIR_CFG, 0x000000), - REG(QSYS_EIR_CFG, 0x000004), - REG(QSYS_SE_CFG, 0x000008), - REG(QSYS_SE_DWRR_CFG, 0x00000c), - REG(QSYS_SE_CONNECT, 0x00003c), - REG(QSYS_SE_DLB_SENSE, 0x000040), - REG(QSYS_CIR_STATE, 0x000044), - REG(QSYS_EIR_STATE, 0x000048), - REG(QSYS_SE_STATE, 0x00004c), - REG(QSYS_HSCH_MISC_CFG, 0x011388), -}; - -static const u32 ocelot_rew_regmap[] = { - REG(REW_PORT_VLAN_CFG, 0x000000), - REG(REW_TAG_CFG, 0x000004), - REG(REW_PORT_CFG, 0x000008), - REG(REW_DSCP_CFG, 0x00000c), - REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), - REG(REW_PTP_CFG, 0x000050), - REG(REW_PTP_DLY1_CFG, 0x000054), - REG(REW_DSCP_REMAP_DP1_CFG, 0x000690), - REG(REW_DSCP_REMAP_CFG, 0x000790), - REG(REW_STAT_CFG, 0x000890), - REG(REW_PPT, 0x000680), -}; - -static const u32 ocelot_sys_regmap[] = { - REG(SYS_COUNT_RX_OCTETS, 0x000000), - REG(SYS_COUNT_RX_UNICAST, 0x000004), - REG(SYS_COUNT_RX_MULTICAST, 0x000008), - REG(SYS_COUNT_RX_BROADCAST, 0x00000c), - REG(SYS_COUNT_RX_SHORTS, 0x000010), - REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), - REG(SYS_COUNT_RX_JABBERS, 0x000018), - REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), - REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), - REG(SYS_COUNT_RX_64, 0x000024), - REG(SYS_COUNT_RX_65_127, 0x000028), - REG(SYS_COUNT_RX_128_255, 0x00002c), - REG(SYS_COUNT_RX_256_1023, 0x000030), - REG(SYS_COUNT_RX_1024_1526, 0x000034), - REG(SYS_COUNT_RX_1527_MAX, 0x000038), - REG(SYS_COUNT_RX_PAUSE, 0x00003c), - REG(SYS_COUNT_RX_CONTROL, 0x000040), - REG(SYS_COUNT_RX_LONGS, 0x000044), - REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048), - REG(SYS_COUNT_TX_OCTETS, 0x000100), - REG(SYS_COUNT_TX_UNICAST, 0x000104), - REG(SYS_COUNT_TX_MULTICAST, 0x000108), - REG(SYS_COUNT_TX_BROADCAST, 0x00010c), - REG(SYS_COUNT_TX_COLLISION, 0x000110), - REG(SYS_COUNT_TX_DROPS, 0x000114), - REG(SYS_COUNT_TX_PAUSE, 0x000118), - REG(SYS_COUNT_TX_64, 0x00011c), - REG(SYS_COUNT_TX_65_127, 0x000120), - REG(SYS_COUNT_TX_128_511, 0x000124), - REG(SYS_COUNT_TX_512_1023, 0x000128), - REG(SYS_COUNT_TX_1024_1526, 0x00012c), - REG(SYS_COUNT_TX_1527_MAX, 0x000130), - REG(SYS_COUNT_TX_AGING, 0x000170), - REG(SYS_RESET_CFG, 0x000508), - REG(SYS_CMID, 0x00050c), - REG(SYS_VLAN_ETYPE_CFG, 0x000510), - REG(SYS_PORT_MODE, 0x000514), - REG(SYS_FRONT_PORT_MODE, 0x000548), - REG(SYS_FRM_AGING, 0x000574), - REG(SYS_STAT_CFG, 0x000578), - REG(SYS_SW_STATUS, 0x00057c), - REG(SYS_MISC_CFG, 0x0005ac), - REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0), - REG(SYS_REW_MAC_LOW_CFG, 0x0005dc), - REG(SYS_CM_ADDR, 0x000500), - REG(SYS_CM_DATA, 0x000504), - REG(SYS_PAUSE_CFG, 0x000608), - REG(SYS_PAUSE_TOT_CFG, 0x000638), - REG(SYS_ATOP, 0x00063c), - REG(SYS_ATOP_TOT_CFG, 0x00066c), - REG(SYS_MAC_FC_CFG, 0x000670), - REG(SYS_MMGT, 0x00069c), - REG(SYS_MMGT_FAST, 0x0006a0), - REG(SYS_EVENTS_DIF, 0x0006a4), - REG(SYS_EVENTS_CORE, 0x0006b4), - REG(SYS_CNT, 0x000000), - REG(SYS_PTP_STATUS, 0x0006b8), - REG(SYS_PTP_TXSTAMP, 0x0006bc), - REG(SYS_PTP_NXT, 0x0006c0), - REG(SYS_PTP_CFG, 0x0006c4), -}; - -static const u32 ocelot_vcap_regmap[] = { - /* VCAP_CORE_CFG */ - REG(VCAP_CORE_UPDATE_CTRL, 0x000000), - REG(VCAP_CORE_MV_CFG, 0x000004), - /* VCAP_CORE_CACHE */ - REG(VCAP_CACHE_ENTRY_DAT, 0x000008), - REG(VCAP_CACHE_MASK_DAT, 0x000108), - REG(VCAP_CACHE_ACTION_DAT, 0x000208), - REG(VCAP_CACHE_CNT_DAT, 0x000308), - REG(VCAP_CACHE_TG_DAT, 0x000388), - /* VCAP_CONST */ - REG(VCAP_CONST_VCAP_VER, 0x000398), - REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c), - REG(VCAP_CONST_ENTRY_CNT, 0x0003a0), - REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4), - REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8), - REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac), - REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0), - REG(VCAP_CONST_CNT_WIDTH, 0x0003b4), - REG(VCAP_CONST_CORE_CNT, 0x0003b8), - REG(VCAP_CONST_IF_CNT, 0x0003bc), -}; - -static const u32 ocelot_ptp_regmap[] = { - REG(PTP_PIN_CFG, 0x000000), - REG(PTP_PIN_TOD_SEC_MSB, 0x000004), - REG(PTP_PIN_TOD_SEC_LSB, 0x000008), - REG(PTP_PIN_TOD_NSEC, 0x00000c), - REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014), - REG(PTP_PIN_WF_LOW_PERIOD, 0x000018), - REG(PTP_CFG_MISC, 0x0000a0), - REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4), - REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), -}; - -static const u32 ocelot_dev_gmii_regmap[] = { - REG(DEV_CLOCK_CFG, 0x0), - REG(DEV_PORT_MISC, 0x4), - REG(DEV_EVENTS, 0x8), - REG(DEV_EEE_CFG, 0xc), - REG(DEV_RX_PATH_DELAY, 0x10), - REG(DEV_TX_PATH_DELAY, 0x14), - REG(DEV_PTP_PREDICT_CFG, 0x18), - REG(DEV_MAC_ENA_CFG, 0x1c), - REG(DEV_MAC_MODE_CFG, 0x20), - REG(DEV_MAC_MAXLEN_CFG, 0x24), - REG(DEV_MAC_TAGS_CFG, 0x28), - REG(DEV_MAC_ADV_CHK_CFG, 0x2c), - REG(DEV_MAC_IFG_CFG, 0x30), - REG(DEV_MAC_HDX_CFG, 0x34), - REG(DEV_MAC_DBG_CFG, 0x38), - REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c), - REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40), - REG(DEV_MAC_STICKY, 0x44), - REG(PCS1G_CFG, 0x48), - REG(PCS1G_MODE_CFG, 0x4c), - REG(PCS1G_SD_CFG, 0x50), - REG(PCS1G_ANEG_CFG, 0x54), - REG(PCS1G_ANEG_NP_CFG, 0x58), - REG(PCS1G_LB_CFG, 0x5c), - REG(PCS1G_DBG_CFG, 0x60), - REG(PCS1G_CDET_CFG, 0x64), - REG(PCS1G_ANEG_STATUS, 0x68), - REG(PCS1G_ANEG_NP_STATUS, 0x6c), - REG(PCS1G_LINK_STATUS, 0x70), - REG(PCS1G_LINK_DOWN_CNT, 0x74), - REG(PCS1G_STICKY, 0x78), - REG(PCS1G_DEBUG_STATUS, 0x7c), - REG(PCS1G_LPI_CFG, 0x80), - REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84), - REG(PCS1G_LPI_STATUS, 0x88), - REG(PCS1G_TSTPAT_MODE_CFG, 0x8c), - REG(PCS1G_TSTPAT_STATUS, 0x90), - REG(DEV_PCS_FX100_CFG, 0x94), - REG(DEV_PCS_FX100_STATUS, 0x98), -}; - static const u32 *ocelot_regmap[TARGET_MAX] = { - [ANA] = ocelot_ana_regmap, - [QS] = ocelot_qs_regmap, - [QSYS] = ocelot_qsys_regmap, - [REW] = ocelot_rew_regmap, - [SYS] = ocelot_sys_regmap, - [S0] = ocelot_vcap_regmap, - [S1] = ocelot_vcap_regmap, - [S2] = ocelot_vcap_regmap, - [PTP] = ocelot_ptp_regmap, - [DEV_GMII] = ocelot_dev_gmii_regmap, + [ANA] = vsc7514_ana_regmap, + [QS] = vsc7514_qs_regmap, + [QSYS] = vsc7514_qsys_regmap, + [REW] = vsc7514_rew_regmap, + [SYS] = vsc7514_sys_regmap, + [S0] = vsc7514_vcap_regmap, + [S1] = vsc7514_vcap_regmap, + [S2] = vsc7514_vcap_regmap, + [PTP] = vsc7514_ptp_regmap, + [DEV_GMII] = vsc7514_dev_gmii_regmap, }; static const struct reg_field ocelot_regfields[REGFIELD_MAX] = { @@ -636,211 +344,6 @@ static const struct ocelot_ops ocelot_ops = { .netdev_to_port = ocelot_netdev_to_port, }; -static const struct vcap_field vsc7514_vcap_es0_keys[] = { - [VCAP_ES0_EGR_PORT] = { 0, 4}, - [VCAP_ES0_IGR_PORT] = { 4, 4}, - [VCAP_ES0_RSV] = { 8, 2}, - [VCAP_ES0_L2_MC] = { 10, 1}, - [VCAP_ES0_L2_BC] = { 11, 1}, - [VCAP_ES0_VID] = { 12, 12}, - [VCAP_ES0_DP] = { 24, 1}, - [VCAP_ES0_PCP] = { 25, 3}, -}; - -static const struct vcap_field vsc7514_vcap_es0_actions[] = { - [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2}, - [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1}, - [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2}, - [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1}, - [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2}, - [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2}, - [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2}, - [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1}, - [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2}, - [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2}, - [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12}, - [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3}, - [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1}, - [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12}, - [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3}, - [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1}, - [VCAP_ES0_ACT_RSV] = { 49, 24}, - [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1}, -}; - -static const struct vcap_field vsc7514_vcap_is1_keys[] = { - [VCAP_IS1_HK_TYPE] = { 0, 1}, - [VCAP_IS1_HK_LOOKUP] = { 1, 2}, - [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12}, - [VCAP_IS1_HK_RSV] = { 15, 9}, - [VCAP_IS1_HK_OAM_Y1731] = { 24, 1}, - [VCAP_IS1_HK_L2_MC] = { 25, 1}, - [VCAP_IS1_HK_L2_BC] = { 26, 1}, - [VCAP_IS1_HK_IP_MC] = { 27, 1}, - [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1}, - [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1}, - [VCAP_IS1_HK_TPID] = { 30, 1}, - [VCAP_IS1_HK_VID] = { 31, 12}, - [VCAP_IS1_HK_DEI] = { 43, 1}, - [VCAP_IS1_HK_PCP] = { 44, 3}, - /* Specific Fields for IS1 Half Key S1_NORMAL */ - [VCAP_IS1_HK_L2_SMAC] = { 47, 48}, - [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1}, - [VCAP_IS1_HK_ETYPE] = { 96, 16}, - [VCAP_IS1_HK_IP_SNAP] = {112, 1}, - [VCAP_IS1_HK_IP4] = {113, 1}, - /* Layer-3 Information */ - [VCAP_IS1_HK_L3_FRAGMENT] = {114, 1}, - [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {115, 1}, - [VCAP_IS1_HK_L3_OPTIONS] = {116, 1}, - [VCAP_IS1_HK_L3_DSCP] = {117, 6}, - [VCAP_IS1_HK_L3_IP4_SIP] = {123, 32}, - /* Layer-4 Information */ - [VCAP_IS1_HK_TCP_UDP] = {155, 1}, - [VCAP_IS1_HK_TCP] = {156, 1}, - [VCAP_IS1_HK_L4_SPORT] = {157, 16}, - [VCAP_IS1_HK_L4_RNG] = {173, 8}, - /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */ - [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1}, - [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12}, - [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1}, - [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3}, - [VCAP_IS1_HK_IP4_IP4] = { 64, 1}, - [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1}, - [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1}, - [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1}, - [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6}, - [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32}, - [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {106, 32}, - [VCAP_IS1_HK_IP4_L3_PROTO] = {138, 8}, - [VCAP_IS1_HK_IP4_TCP_UDP] = {146, 1}, - [VCAP_IS1_HK_IP4_TCP] = {147, 1}, - [VCAP_IS1_HK_IP4_L4_RNG] = {148, 8}, - [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {156, 32}, -}; - -static const struct vcap_field vsc7514_vcap_is1_actions[] = { - [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1}, - [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6}, - [VCAP_IS1_ACT_QOS_ENA] = { 7, 1}, - [VCAP_IS1_ACT_QOS_VAL] = { 8, 3}, - [VCAP_IS1_ACT_DP_ENA] = { 11, 1}, - [VCAP_IS1_ACT_DP_VAL] = { 12, 1}, - [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8}, - [VCAP_IS1_ACT_PAG_VAL] = { 21, 8}, - [VCAP_IS1_ACT_RSV] = { 29, 9}, - /* The fields below are incorrectly shifted by 2 in the manual */ - [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1}, - [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12}, - [VCAP_IS1_ACT_FID_SEL] = { 51, 2}, - [VCAP_IS1_ACT_FID_VAL] = { 53, 13}, - [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1}, - [VCAP_IS1_ACT_PCP_VAL] = { 67, 3}, - [VCAP_IS1_ACT_DEI_VAL] = { 70, 1}, - [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1}, - [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2}, - [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4}, - [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1}, -}; - -static const struct vcap_field vsc7514_vcap_is2_keys[] = { - /* Common: 46 bits */ - [VCAP_IS2_TYPE] = { 0, 4}, - [VCAP_IS2_HK_FIRST] = { 4, 1}, - [VCAP_IS2_HK_PAG] = { 5, 8}, - [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12}, - [VCAP_IS2_HK_RSV2] = { 25, 1}, - [VCAP_IS2_HK_HOST_MATCH] = { 26, 1}, - [VCAP_IS2_HK_L2_MC] = { 27, 1}, - [VCAP_IS2_HK_L2_BC] = { 28, 1}, - [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1}, - [VCAP_IS2_HK_VID] = { 30, 12}, - [VCAP_IS2_HK_DEI] = { 42, 1}, - [VCAP_IS2_HK_PCP] = { 43, 3}, - /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */ - [VCAP_IS2_HK_L2_DMAC] = { 46, 48}, - [VCAP_IS2_HK_L2_SMAC] = { 94, 48}, - /* MAC_ETYPE (TYPE=000) */ - [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {142, 16}, - [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {158, 16}, - [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {174, 8}, - [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {182, 3}, - /* MAC_LLC (TYPE=001) */ - [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {142, 40}, - /* MAC_SNAP (TYPE=010) */ - [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {142, 40}, - /* MAC_ARP (TYPE=011) */ - [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48}, - [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1}, - [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1}, - [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1}, - [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1}, - [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1}, - [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1}, - [VCAP_IS2_HK_MAC_ARP_OPCODE] = {100, 2}, - [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {102, 32}, - [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {134, 32}, - [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {166, 1}, - /* IP4_TCP_UDP / IP4_OTHER common */ - [VCAP_IS2_HK_IP4] = { 46, 1}, - [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1}, - [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1}, - [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1}, - [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1}, - [VCAP_IS2_HK_L3_TOS] = { 51, 8}, - [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32}, - [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32}, - [VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1}, - /* IP4_TCP_UDP (TYPE=100) */ - [VCAP_IS2_HK_TCP] = {124, 1}, - [VCAP_IS2_HK_L4_DPORT] = {125, 16}, - [VCAP_IS2_HK_L4_SPORT] = {141, 16}, - [VCAP_IS2_HK_L4_RNG] = {157, 8}, - [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1}, - [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1}, - [VCAP_IS2_HK_L4_FIN] = {167, 1}, - [VCAP_IS2_HK_L4_SYN] = {168, 1}, - [VCAP_IS2_HK_L4_RST] = {169, 1}, - [VCAP_IS2_HK_L4_PSH] = {170, 1}, - [VCAP_IS2_HK_L4_ACK] = {171, 1}, - [VCAP_IS2_HK_L4_URG] = {172, 1}, - [VCAP_IS2_HK_L4_1588_DOM] = {173, 8}, - [VCAP_IS2_HK_L4_1588_VER] = {181, 4}, - /* IP4_OTHER (TYPE=101) */ - [VCAP_IS2_HK_IP4_L3_PROTO] = {124, 8}, - [VCAP_IS2_HK_L3_PAYLOAD] = {132, 56}, - /* IP6_STD (TYPE=110) */ - [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1}, - [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128}, - [VCAP_IS2_HK_IP6_L3_PROTO] = {175, 8}, - /* OAM (TYPE=111) */ - [VCAP_IS2_HK_OAM_MEL_FLAGS] = {142, 7}, - [VCAP_IS2_HK_OAM_VER] = {149, 5}, - [VCAP_IS2_HK_OAM_OPCODE] = {154, 8}, - [VCAP_IS2_HK_OAM_FLAGS] = {162, 8}, - [VCAP_IS2_HK_OAM_MEPID] = {170, 16}, - [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {186, 1}, - [VCAP_IS2_HK_OAM_IS_Y1731] = {187, 1}, -}; - -static const struct vcap_field vsc7514_vcap_is2_actions[] = { - [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1}, - [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1}, - [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3}, - [VCAP_IS2_ACT_MASK_MODE] = { 5, 2}, - [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1}, - [VCAP_IS2_ACT_LRN_DIS] = { 8, 1}, - [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1}, - [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9}, - [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1}, - [VCAP_IS2_ACT_PORT_MASK] = { 20, 11}, - [VCAP_IS2_ACT_REW_OP] = { 31, 9}, - [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1}, - [VCAP_IS2_ACT_RSV] = { 41, 2}, - [VCAP_IS2_ACT_ACL_ID] = { 43, 6}, - [VCAP_IS2_ACT_HIT_CNT] = { 49, 32}, -}; - static struct vcap_props vsc7514_vcap_props[] = { [VCAP_ES0] = { .action_type_width = 0, @@ -1048,6 +551,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) { S1, "s1" }, { S2, "s2" }, { PTP, "ptp", 1 }, + { FDMA, "fdma", 1 }, }; if (!np && !pdev->dev.platform_data) @@ -1083,6 +587,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ocelot->targets[io_target[i].id] = target; } + if (ocelot->targets[FDMA]) + ocelot_fdma_init(pdev, ocelot); + hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio"); if (IS_ERR(hsio)) { dev_err(&pdev->dev, "missing hsio syscon\n"); @@ -1146,6 +653,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (err) goto out_ocelot_devlink_unregister; + if (ocelot->fdma) + ocelot_fdma_start(ocelot); + err = ocelot_devlink_sb_register(ocelot); if (err) goto out_ocelot_release_ports; @@ -1186,6 +696,8 @@ static int mscc_ocelot_remove(struct platform_device *pdev) { struct ocelot *ocelot = platform_get_drvdata(pdev); + if (ocelot->fdma) + ocelot_fdma_deinit(ocelot); devlink_unregister(ocelot->devlink); ocelot_deinit_timestamp(ocelot); ocelot_devlink_sb_unregister(ocelot); diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c new file mode 100644 index 000000000000..c2af4eb8ca5d --- /dev/null +++ b/drivers/net/ethernet/mscc/vsc7514_regs.c @@ -0,0 +1,523 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + * Copyright (c) 2021 Innovative Advantage + */ +#include <soc/mscc/ocelot_vcap.h> +#include <soc/mscc/vsc7514_regs.h> +#include "ocelot.h" + +const u32 vsc7514_ana_regmap[] = { + REG(ANA_ADVLEARN, 0x009000), + REG(ANA_VLANMASK, 0x009004), + REG(ANA_PORT_B_DOMAIN, 0x009008), + REG(ANA_ANAGEFIL, 0x00900c), + REG(ANA_ANEVENTS, 0x009010), + REG(ANA_STORMLIMIT_BURST, 0x009014), + REG(ANA_STORMLIMIT_CFG, 0x009018), + REG(ANA_ISOLATED_PORTS, 0x009028), + REG(ANA_COMMUNITY_PORTS, 0x00902c), + REG(ANA_AUTOAGE, 0x009030), + REG(ANA_MACTOPTIONS, 0x009034), + REG(ANA_LEARNDISC, 0x009038), + REG(ANA_AGENCTRL, 0x00903c), + REG(ANA_MIRRORPORTS, 0x009040), + REG(ANA_EMIRRORPORTS, 0x009044), + REG(ANA_FLOODING, 0x009048), + REG(ANA_FLOODING_IPMC, 0x00904c), + REG(ANA_SFLOW_CFG, 0x009050), + REG(ANA_PORT_MODE, 0x009080), + REG(ANA_PGID_PGID, 0x008c00), + REG(ANA_TABLES_ANMOVED, 0x008b30), + REG(ANA_TABLES_MACHDATA, 0x008b34), + REG(ANA_TABLES_MACLDATA, 0x008b38), + REG(ANA_TABLES_MACACCESS, 0x008b3c), + REG(ANA_TABLES_MACTINDX, 0x008b40), + REG(ANA_TABLES_VLANACCESS, 0x008b44), + REG(ANA_TABLES_VLANTIDX, 0x008b48), + REG(ANA_TABLES_ISDXACCESS, 0x008b4c), + REG(ANA_TABLES_ISDXTIDX, 0x008b50), + REG(ANA_TABLES_ENTRYLIM, 0x008b00), + REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54), + REG(ANA_TABLES_PTP_ID_LOW, 0x008b58), + REG(ANA_MSTI_STATE, 0x008e00), + REG(ANA_PORT_VLAN_CFG, 0x007000), + REG(ANA_PORT_DROP_CFG, 0x007004), + REG(ANA_PORT_QOS_CFG, 0x007008), + REG(ANA_PORT_VCAP_CFG, 0x00700c), + REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010), + REG(ANA_PORT_VCAP_S2_CFG, 0x00701c), + REG(ANA_PORT_PCP_DEI_MAP, 0x007020), + REG(ANA_PORT_CPU_FWD_CFG, 0x007060), + REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064), + REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068), + REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c), + REG(ANA_PORT_PORT_CFG, 0x007070), + REG(ANA_PORT_POL_CFG, 0x007074), + REG(ANA_PORT_PTP_CFG, 0x007078), + REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c), + REG(ANA_OAM_UPM_LM_CNT, 0x007c00), + REG(ANA_PORT_PTP_DLY2_CFG, 0x007080), + REG(ANA_PFC_PFC_CFG, 0x008800), + REG(ANA_PFC_PFC_TIMER, 0x008804), + REG(ANA_IPT_OAM_MEP_CFG, 0x008000), + REG(ANA_IPT_IPT, 0x008004), + REG(ANA_PPT_PPT, 0x008ac0), + REG(ANA_FID_MAP_FID_MAP, 0x000000), + REG(ANA_AGGR_CFG, 0x0090b4), + REG(ANA_CPUQ_CFG, 0x0090b8), + REG(ANA_CPUQ_CFG2, 0x0090bc), + REG(ANA_CPUQ_8021_CFG, 0x0090c0), + REG(ANA_DSCP_CFG, 0x009100), + REG(ANA_DSCP_REWR_CFG, 0x009200), + REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240), + REG(ANA_VCAP_RNG_VAL_CFG, 0x009260), + REG(ANA_VRAP_CFG, 0x009280), + REG(ANA_VRAP_HDR_DATA, 0x009284), + REG(ANA_VRAP_HDR_MASK, 0x009288), + REG(ANA_DISCARD_CFG, 0x00928c), + REG(ANA_FID_CFG, 0x009290), + REG(ANA_POL_PIR_CFG, 0x004000), + REG(ANA_POL_CIR_CFG, 0x004004), + REG(ANA_POL_MODE_CFG, 0x004008), + REG(ANA_POL_PIR_STATE, 0x00400c), + REG(ANA_POL_CIR_STATE, 0x004010), + REG(ANA_POL_STATE, 0x004014), + REG(ANA_POL_FLOWC, 0x008b80), + REG(ANA_POL_HYST, 0x008bec), + REG(ANA_POL_MISC_CFG, 0x008bf0), +}; +EXPORT_SYMBOL(vsc7514_ana_regmap); + +const u32 vsc7514_qs_regmap[] = { + REG(QS_XTR_GRP_CFG, 0x000000), + REG(QS_XTR_RD, 0x000008), + REG(QS_XTR_FRM_PRUNING, 0x000010), + REG(QS_XTR_FLUSH, 0x000018), + REG(QS_XTR_DATA_PRESENT, 0x00001c), + REG(QS_XTR_CFG, 0x000020), + REG(QS_INJ_GRP_CFG, 0x000024), + REG(QS_INJ_WR, 0x00002c), + REG(QS_INJ_CTRL, 0x000034), + REG(QS_INJ_STATUS, 0x00003c), + REG(QS_INJ_ERR, 0x000040), + REG(QS_INH_DBG, 0x000048), +}; +EXPORT_SYMBOL(vsc7514_qs_regmap); + +const u32 vsc7514_qsys_regmap[] = { + REG(QSYS_PORT_MODE, 0x011200), + REG(QSYS_SWITCH_PORT_MODE, 0x011234), + REG(QSYS_STAT_CNT_CFG, 0x011264), + REG(QSYS_EEE_CFG, 0x011268), + REG(QSYS_EEE_THRES, 0x011294), + REG(QSYS_IGR_NO_SHARING, 0x011298), + REG(QSYS_EGR_NO_SHARING, 0x01129c), + REG(QSYS_SW_STATUS, 0x0112a0), + REG(QSYS_EXT_CPU_CFG, 0x0112d0), + REG(QSYS_PAD_CFG, 0x0112d4), + REG(QSYS_CPU_GROUP_MAP, 0x0112d8), + REG(QSYS_QMAP, 0x0112dc), + REG(QSYS_ISDX_SGRP, 0x011400), + REG(QSYS_TIMED_FRAME_ENTRY, 0x014000), + REG(QSYS_TFRM_MISC, 0x011310), + REG(QSYS_TFRM_PORT_DLY, 0x011314), + REG(QSYS_TFRM_TIMER_CFG_1, 0x011318), + REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c), + REG(QSYS_TFRM_TIMER_CFG_3, 0x011320), + REG(QSYS_TFRM_TIMER_CFG_4, 0x011324), + REG(QSYS_TFRM_TIMER_CFG_5, 0x011328), + REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c), + REG(QSYS_TFRM_TIMER_CFG_7, 0x011330), + REG(QSYS_TFRM_TIMER_CFG_8, 0x011334), + REG(QSYS_RED_PROFILE, 0x011338), + REG(QSYS_RES_QOS_MODE, 0x011378), + REG(QSYS_RES_CFG, 0x012000), + REG(QSYS_RES_STAT, 0x012004), + REG(QSYS_EGR_DROP_MODE, 0x01137c), + REG(QSYS_EQ_CTRL, 0x011380), + REG(QSYS_EVENTS_CORE, 0x011384), + REG(QSYS_CIR_CFG, 0x000000), + REG(QSYS_EIR_CFG, 0x000004), + REG(QSYS_SE_CFG, 0x000008), + REG(QSYS_SE_DWRR_CFG, 0x00000c), + REG(QSYS_SE_CONNECT, 0x00003c), + REG(QSYS_SE_DLB_SENSE, 0x000040), + REG(QSYS_CIR_STATE, 0x000044), + REG(QSYS_EIR_STATE, 0x000048), + REG(QSYS_SE_STATE, 0x00004c), + REG(QSYS_HSCH_MISC_CFG, 0x011388), +}; +EXPORT_SYMBOL(vsc7514_qsys_regmap); + +const u32 vsc7514_rew_regmap[] = { + REG(REW_PORT_VLAN_CFG, 0x000000), + REG(REW_TAG_CFG, 0x000004), + REG(REW_PORT_CFG, 0x000008), + REG(REW_DSCP_CFG, 0x00000c), + REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), + REG(REW_PTP_CFG, 0x000050), + REG(REW_PTP_DLY1_CFG, 0x000054), + REG(REW_DSCP_REMAP_DP1_CFG, 0x000690), + REG(REW_DSCP_REMAP_CFG, 0x000790), + REG(REW_STAT_CFG, 0x000890), + REG(REW_PPT, 0x000680), +}; +EXPORT_SYMBOL(vsc7514_rew_regmap); + +const u32 vsc7514_sys_regmap[] = { + REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_UNICAST, 0x000004), + REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_BROADCAST, 0x00000c), + REG(SYS_COUNT_RX_SHORTS, 0x000010), + REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), + REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), + REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), + REG(SYS_COUNT_RX_64, 0x000024), + REG(SYS_COUNT_RX_65_127, 0x000028), + REG(SYS_COUNT_RX_128_255, 0x00002c), + REG(SYS_COUNT_RX_256_1023, 0x000030), + REG(SYS_COUNT_RX_1024_1526, 0x000034), + REG(SYS_COUNT_RX_1527_MAX, 0x000038), + REG(SYS_COUNT_RX_PAUSE, 0x00003c), + REG(SYS_COUNT_RX_CONTROL, 0x000040), + REG(SYS_COUNT_RX_LONGS, 0x000044), + REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048), + REG(SYS_COUNT_TX_OCTETS, 0x000100), + REG(SYS_COUNT_TX_UNICAST, 0x000104), + REG(SYS_COUNT_TX_MULTICAST, 0x000108), + REG(SYS_COUNT_TX_BROADCAST, 0x00010c), + REG(SYS_COUNT_TX_COLLISION, 0x000110), + REG(SYS_COUNT_TX_DROPS, 0x000114), + REG(SYS_COUNT_TX_PAUSE, 0x000118), + REG(SYS_COUNT_TX_64, 0x00011c), + REG(SYS_COUNT_TX_65_127, 0x000120), + REG(SYS_COUNT_TX_128_511, 0x000124), + REG(SYS_COUNT_TX_512_1023, 0x000128), + REG(SYS_COUNT_TX_1024_1526, 0x00012c), + REG(SYS_COUNT_TX_1527_MAX, 0x000130), + REG(SYS_COUNT_TX_AGING, 0x000170), + REG(SYS_RESET_CFG, 0x000508), + REG(SYS_CMID, 0x00050c), + REG(SYS_VLAN_ETYPE_CFG, 0x000510), + REG(SYS_PORT_MODE, 0x000514), + REG(SYS_FRONT_PORT_MODE, 0x000548), + REG(SYS_FRM_AGING, 0x000574), + REG(SYS_STAT_CFG, 0x000578), + REG(SYS_SW_STATUS, 0x00057c), + REG(SYS_MISC_CFG, 0x0005ac), + REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0), + REG(SYS_REW_MAC_LOW_CFG, 0x0005dc), + REG(SYS_CM_ADDR, 0x000500), + REG(SYS_CM_DATA, 0x000504), + REG(SYS_PAUSE_CFG, 0x000608), + REG(SYS_PAUSE_TOT_CFG, 0x000638), + REG(SYS_ATOP, 0x00063c), + REG(SYS_ATOP_TOT_CFG, 0x00066c), + REG(SYS_MAC_FC_CFG, 0x000670), + REG(SYS_MMGT, 0x00069c), + REG(SYS_MMGT_FAST, 0x0006a0), + REG(SYS_EVENTS_DIF, 0x0006a4), + REG(SYS_EVENTS_CORE, 0x0006b4), + REG(SYS_CNT, 0x000000), + REG(SYS_PTP_STATUS, 0x0006b8), + REG(SYS_PTP_TXSTAMP, 0x0006bc), + REG(SYS_PTP_NXT, 0x0006c0), + REG(SYS_PTP_CFG, 0x0006c4), +}; +EXPORT_SYMBOL(vsc7514_sys_regmap); + +const u32 vsc7514_vcap_regmap[] = { + /* VCAP_CORE_CFG */ + REG(VCAP_CORE_UPDATE_CTRL, 0x000000), + REG(VCAP_CORE_MV_CFG, 0x000004), + /* VCAP_CORE_CACHE */ + REG(VCAP_CACHE_ENTRY_DAT, 0x000008), + REG(VCAP_CACHE_MASK_DAT, 0x000108), + REG(VCAP_CACHE_ACTION_DAT, 0x000208), + REG(VCAP_CACHE_CNT_DAT, 0x000308), + REG(VCAP_CACHE_TG_DAT, 0x000388), + /* VCAP_CONST */ + REG(VCAP_CONST_VCAP_VER, 0x000398), + REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c), + REG(VCAP_CONST_ENTRY_CNT, 0x0003a0), + REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4), + REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8), + REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac), + REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0), + REG(VCAP_CONST_CNT_WIDTH, 0x0003b4), + REG(VCAP_CONST_CORE_CNT, 0x0003b8), + REG(VCAP_CONST_IF_CNT, 0x0003bc), +}; +EXPORT_SYMBOL(vsc7514_vcap_regmap); + +const u32 vsc7514_ptp_regmap[] = { + REG(PTP_PIN_CFG, 0x000000), + REG(PTP_PIN_TOD_SEC_MSB, 0x000004), + REG(PTP_PIN_TOD_SEC_LSB, 0x000008), + REG(PTP_PIN_TOD_NSEC, 0x00000c), + REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014), + REG(PTP_PIN_WF_LOW_PERIOD, 0x000018), + REG(PTP_CFG_MISC, 0x0000a0), + REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4), + REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), +}; +EXPORT_SYMBOL(vsc7514_ptp_regmap); + +const u32 vsc7514_dev_gmii_regmap[] = { + REG(DEV_CLOCK_CFG, 0x0), + REG(DEV_PORT_MISC, 0x4), + REG(DEV_EVENTS, 0x8), + REG(DEV_EEE_CFG, 0xc), + REG(DEV_RX_PATH_DELAY, 0x10), + REG(DEV_TX_PATH_DELAY, 0x14), + REG(DEV_PTP_PREDICT_CFG, 0x18), + REG(DEV_MAC_ENA_CFG, 0x1c), + REG(DEV_MAC_MODE_CFG, 0x20), + REG(DEV_MAC_MAXLEN_CFG, 0x24), + REG(DEV_MAC_TAGS_CFG, 0x28), + REG(DEV_MAC_ADV_CHK_CFG, 0x2c), + REG(DEV_MAC_IFG_CFG, 0x30), + REG(DEV_MAC_HDX_CFG, 0x34), + REG(DEV_MAC_DBG_CFG, 0x38), + REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c), + REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40), + REG(DEV_MAC_STICKY, 0x44), + REG(PCS1G_CFG, 0x48), + REG(PCS1G_MODE_CFG, 0x4c), + REG(PCS1G_SD_CFG, 0x50), + REG(PCS1G_ANEG_CFG, 0x54), + REG(PCS1G_ANEG_NP_CFG, 0x58), + REG(PCS1G_LB_CFG, 0x5c), + REG(PCS1G_DBG_CFG, 0x60), + REG(PCS1G_CDET_CFG, 0x64), + REG(PCS1G_ANEG_STATUS, 0x68), + REG(PCS1G_ANEG_NP_STATUS, 0x6c), + REG(PCS1G_LINK_STATUS, 0x70), + REG(PCS1G_LINK_DOWN_CNT, 0x74), + REG(PCS1G_STICKY, 0x78), + REG(PCS1G_DEBUG_STATUS, 0x7c), + REG(PCS1G_LPI_CFG, 0x80), + REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84), + REG(PCS1G_LPI_STATUS, 0x88), + REG(PCS1G_TSTPAT_MODE_CFG, 0x8c), + REG(PCS1G_TSTPAT_STATUS, 0x90), + REG(DEV_PCS_FX100_CFG, 0x94), + REG(DEV_PCS_FX100_STATUS, 0x98), +}; +EXPORT_SYMBOL(vsc7514_dev_gmii_regmap); + +const struct vcap_field vsc7514_vcap_es0_keys[] = { + [VCAP_ES0_EGR_PORT] = { 0, 4 }, + [VCAP_ES0_IGR_PORT] = { 4, 4 }, + [VCAP_ES0_RSV] = { 8, 2 }, + [VCAP_ES0_L2_MC] = { 10, 1 }, + [VCAP_ES0_L2_BC] = { 11, 1 }, + [VCAP_ES0_VID] = { 12, 12 }, + [VCAP_ES0_DP] = { 24, 1 }, + [VCAP_ES0_PCP] = { 25, 3 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_es0_keys); + +const struct vcap_field vsc7514_vcap_es0_actions[] = { + [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2 }, + [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1 }, + [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2 }, + [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1 }, + [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2 }, + [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2 }, + [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2 }, + [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1 }, + [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2 }, + [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2 }, + [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12 }, + [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3 }, + [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1 }, + [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12 }, + [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3 }, + [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1 }, + [VCAP_ES0_ACT_RSV] = { 49, 24 }, + [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_es0_actions); + +const struct vcap_field vsc7514_vcap_is1_keys[] = { + [VCAP_IS1_HK_TYPE] = { 0, 1 }, + [VCAP_IS1_HK_LOOKUP] = { 1, 2 }, + [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12 }, + [VCAP_IS1_HK_RSV] = { 15, 9 }, + [VCAP_IS1_HK_OAM_Y1731] = { 24, 1 }, + [VCAP_IS1_HK_L2_MC] = { 25, 1 }, + [VCAP_IS1_HK_L2_BC] = { 26, 1 }, + [VCAP_IS1_HK_IP_MC] = { 27, 1 }, + [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1 }, + [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1 }, + [VCAP_IS1_HK_TPID] = { 30, 1 }, + [VCAP_IS1_HK_VID] = { 31, 12 }, + [VCAP_IS1_HK_DEI] = { 43, 1 }, + [VCAP_IS1_HK_PCP] = { 44, 3 }, + /* Specific Fields for IS1 Half Key S1_NORMAL */ + [VCAP_IS1_HK_L2_SMAC] = { 47, 48 }, + [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1 }, + [VCAP_IS1_HK_ETYPE] = { 96, 16 }, + [VCAP_IS1_HK_IP_SNAP] = { 112, 1 }, + [VCAP_IS1_HK_IP4] = { 113, 1 }, + /* Layer-3 Information */ + [VCAP_IS1_HK_L3_FRAGMENT] = { 114, 1 }, + [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = { 115, 1 }, + [VCAP_IS1_HK_L3_OPTIONS] = { 116, 1 }, + [VCAP_IS1_HK_L3_DSCP] = { 117, 6 }, + [VCAP_IS1_HK_L3_IP4_SIP] = { 123, 32 }, + /* Layer-4 Information */ + [VCAP_IS1_HK_TCP_UDP] = { 155, 1 }, + [VCAP_IS1_HK_TCP] = { 156, 1 }, + [VCAP_IS1_HK_L4_SPORT] = { 157, 16 }, + [VCAP_IS1_HK_L4_RNG] = { 173, 8 }, + /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */ + [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1 }, + [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12 }, + [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1 }, + [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3 }, + [VCAP_IS1_HK_IP4_IP4] = { 64, 1 }, + [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1 }, + [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1 }, + [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1 }, + [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6 }, + [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32 }, + [VCAP_IS1_HK_IP4_L3_IP4_SIP] = { 106, 32 }, + [VCAP_IS1_HK_IP4_L3_PROTO] = { 138, 8 }, + [VCAP_IS1_HK_IP4_TCP_UDP] = { 146, 1 }, + [VCAP_IS1_HK_IP4_TCP] = { 147, 1 }, + [VCAP_IS1_HK_IP4_L4_RNG] = { 148, 8 }, + [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = { 156, 32 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_is1_keys); + +const struct vcap_field vsc7514_vcap_is1_actions[] = { + [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1 }, + [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6 }, + [VCAP_IS1_ACT_QOS_ENA] = { 7, 1 }, + [VCAP_IS1_ACT_QOS_VAL] = { 8, 3 }, + [VCAP_IS1_ACT_DP_ENA] = { 11, 1 }, + [VCAP_IS1_ACT_DP_VAL] = { 12, 1 }, + [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8 }, + [VCAP_IS1_ACT_PAG_VAL] = { 21, 8 }, + [VCAP_IS1_ACT_RSV] = { 29, 9 }, + /* The fields below are incorrectly shifted by 2 in the manual */ + [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1 }, + [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12 }, + [VCAP_IS1_ACT_FID_SEL] = { 51, 2 }, + [VCAP_IS1_ACT_FID_VAL] = { 53, 13 }, + [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1 }, + [VCAP_IS1_ACT_PCP_VAL] = { 67, 3 }, + [VCAP_IS1_ACT_DEI_VAL] = { 70, 1 }, + [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1 }, + [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2 }, + [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4 }, + [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_is1_actions); + +const struct vcap_field vsc7514_vcap_is2_keys[] = { + /* Common: 46 bits */ + [VCAP_IS2_TYPE] = { 0, 4 }, + [VCAP_IS2_HK_FIRST] = { 4, 1 }, + [VCAP_IS2_HK_PAG] = { 5, 8 }, + [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12 }, + [VCAP_IS2_HK_RSV2] = { 25, 1 }, + [VCAP_IS2_HK_HOST_MATCH] = { 26, 1 }, + [VCAP_IS2_HK_L2_MC] = { 27, 1 }, + [VCAP_IS2_HK_L2_BC] = { 28, 1 }, + [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1 }, + [VCAP_IS2_HK_VID] = { 30, 12 }, + [VCAP_IS2_HK_DEI] = { 42, 1 }, + [VCAP_IS2_HK_PCP] = { 43, 3 }, + /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */ + [VCAP_IS2_HK_L2_DMAC] = { 46, 48 }, + [VCAP_IS2_HK_L2_SMAC] = { 94, 48 }, + /* MAC_ETYPE (TYPE=000) */ + [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = { 142, 16 }, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = { 158, 16 }, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = { 174, 8 }, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = { 182, 3 }, + /* MAC_LLC (TYPE=001) */ + [VCAP_IS2_HK_MAC_LLC_L2_LLC] = { 142, 40 }, + /* MAC_SNAP (TYPE=010) */ + [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = { 142, 40 }, + /* MAC_ARP (TYPE=011) */ + [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48 }, + [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1 }, + [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1 }, + [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1 }, + [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1 }, + [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1 }, + [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1 }, + [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 100, 2 }, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 102, 32 }, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = { 134, 32 }, + [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = { 166, 1 }, + /* IP4_TCP_UDP / IP4_OTHER common */ + [VCAP_IS2_HK_IP4] = { 46, 1 }, + [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1 }, + [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1 }, + [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1 }, + [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1 }, + [VCAP_IS2_HK_L3_TOS] = { 51, 8 }, + [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32 }, + [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32 }, + [VCAP_IS2_HK_DIP_EQ_SIP] = { 123, 1 }, + /* IP4_TCP_UDP (TYPE=100) */ + [VCAP_IS2_HK_TCP] = { 124, 1 }, + [VCAP_IS2_HK_L4_DPORT] = { 125, 16 }, + [VCAP_IS2_HK_L4_SPORT] = { 141, 16 }, + [VCAP_IS2_HK_L4_RNG] = { 157, 8 }, + [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = { 165, 1 }, + [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = { 166, 1 }, + [VCAP_IS2_HK_L4_FIN] = { 167, 1 }, + [VCAP_IS2_HK_L4_SYN] = { 168, 1 }, + [VCAP_IS2_HK_L4_RST] = { 169, 1 }, + [VCAP_IS2_HK_L4_PSH] = { 170, 1 }, + [VCAP_IS2_HK_L4_ACK] = { 171, 1 }, + [VCAP_IS2_HK_L4_URG] = { 172, 1 }, + [VCAP_IS2_HK_L4_1588_DOM] = { 173, 8 }, + [VCAP_IS2_HK_L4_1588_VER] = { 181, 4 }, + /* IP4_OTHER (TYPE=101) */ + [VCAP_IS2_HK_IP4_L3_PROTO] = { 124, 8 }, + [VCAP_IS2_HK_L3_PAYLOAD] = { 132, 56 }, + /* IP6_STD (TYPE=110) */ + [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1 }, + [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128 }, + [VCAP_IS2_HK_IP6_L3_PROTO] = { 175, 8 }, + /* OAM (TYPE=111) */ + [VCAP_IS2_HK_OAM_MEL_FLAGS] = { 142, 7 }, + [VCAP_IS2_HK_OAM_VER] = { 149, 5 }, + [VCAP_IS2_HK_OAM_OPCODE] = { 154, 8 }, + [VCAP_IS2_HK_OAM_FLAGS] = { 162, 8 }, + [VCAP_IS2_HK_OAM_MEPID] = { 170, 16 }, + [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = { 186, 1 }, + [VCAP_IS2_HK_OAM_IS_Y1731] = { 187, 1 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_is2_keys); + +const struct vcap_field vsc7514_vcap_is2_actions[] = { + [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1 }, + [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1 }, + [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3 }, + [VCAP_IS2_ACT_MASK_MODE] = { 5, 2 }, + [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1 }, + [VCAP_IS2_ACT_LRN_DIS] = { 8, 1 }, + [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1 }, + [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9 }, + [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1 }, + [VCAP_IS2_ACT_PORT_MASK] = { 20, 11 }, + [VCAP_IS2_ACT_REW_OP] = { 31, 9 }, + [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1 }, + [VCAP_IS2_ACT_RSV] = { 41, 2 }, + [VCAP_IS2_ACT_ACL_ID] = { 43, 6 }, + [VCAP_IS2_ACT_HIT_CNT] = { 49, 32 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_is2_actions); diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index d74a80f010c5..3f371faeb6d0 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -114,6 +114,7 @@ static int sonic_probe1(struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); int err = -ENODEV; int i; + unsigned char addr[ETH_ALEN]; if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string)) return -EBUSY; @@ -143,9 +144,10 @@ static int sonic_probe1(struct net_device *dev) SONIC_WRITE(SONIC_CEP,0); for (i=0; i<3; i++) { val = SONIC_READ(SONIC_CAP0-i); - dev->dev_addr[i*2] = val; - dev->dev_addr[i*2+1] = val >> 8; + addr[i*2] = val; + addr[i*2+1] = val >> 8; } + eth_hw_addr_set(dev, addr); lp->dma_bitmode = SONIC_BITMODE32; diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index ca4686094701..52fef34d43f9 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -120,13 +120,14 @@ static const struct net_device_ops xtsonic_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; -static int __init sonic_probe1(struct net_device *dev) +static int sonic_probe1(struct net_device *dev) { unsigned int silicon_revision; struct sonic_local *lp = netdev_priv(dev); unsigned int base_addr = dev->base_addr; int i; int err = 0; + unsigned char addr[ETH_ALEN]; if (!request_mem_region(base_addr, 0x100, xtsonic_string)) return -EBUSY; @@ -163,9 +164,10 @@ static int __init sonic_probe1(struct net_device *dev) for (i=0; i<3; i++) { unsigned int val = SONIC_READ(SONIC_CAP0-i); - dev->dev_addr[i*2] = val; - dev->dev_addr[i*2+1] = val >> 8; + addr[i*2] = val; + addr[i*2+1] = val >> 8; } + eth_hw_addr_set(dev, addr); lp->dma_bitmode = SONIC_BITMODE32; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 1969009a91e7..2c2e9e56ed4e 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3159,10 +3159,6 @@ static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data) if (copy_from_user(&config, data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - /* Transmit HW Timestamp not supported */ switch (config.tx_type) { case HWTSTAMP_TX_OFF: diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index d7ac0307797f..34c0d2ddf9ef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size) return -ENOMEM; cache = kzalloc(sizeof(*cache), GFP_KERNEL); - if (!cache) + if (!cache) { + nfp_cpp_area_free(area); return -ENOMEM; + } cache->id = 0; cache->addr = 0; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 71d234291fc5..1dc40c537281 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -210,9 +210,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - if (cfg.flags) /* reserved for future extensions */ - return -EINVAL; - /* Get ieee1588's dev information */ pdev = adapter->ptp_pdev; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index f2cedbd9489c..ed1a84542ad2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -2721,6 +2721,25 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); #define NUM_STORMS 6 /** + * qed_get_protocol_type_str(): Get a string for Protocol type. + * + * @protocol_type: Protocol type (using enum protocol_type). + * + * Return: String. + */ +const char *qed_get_protocol_type_str(u32 protocol_type); + +/** + * qed_get_ramrod_cmd_id_str(): Get a string for Ramrod command ID. + * + * @protocol_type: Protocol type (using enum protocol_type). + * @ramrod_cmd_id: Ramrod command ID (using per-protocol enum <protocol>_ramrod_cmd_id). + * + * Return: String. + */ +const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id); + +/** * qed_set_rdma_error_level(): Sets the RDMA assert level. * If the severity of the error will be * above the level, the FW will assert. diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 321c43408153..0ce37f2460a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -210,6 +210,82 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = { (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ XSTORM_PQ_INFO_OFFSET(pq_id)) +static const char * const s_protocol_types[] = { + "PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE", + "PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP", + "PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON", + "PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI", +}; + +static const char *s_ramrod_cmd_ids[][28] = { + { + "ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC", + "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC", + "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN", + "ISCSI_RAMROD_CMD_ID_UPDATE_CONN", + "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN", + "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE", + "ISCSI_RAMROD_CMD_ID_CONN_STATS", }, + { "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC", + "FCOE_RAMROD_CMD_ID_STAT_FUNC", + "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN", + "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", }, + { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT", + "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR", + "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ", + "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ", + "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ", + "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING", + "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP", + "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP", + "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP", + "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE", + "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP", + "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP", + "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP", + "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", }, + { "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START", + "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP", + "CORE_RAMROD_TX_QUEUE_STOP", + "CORE_RAMROD_RX_QUEUE_FLUSH", + "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", }, + { "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START", + "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP", + "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP", + "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP", + "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE", + "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION", + "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER", + "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER", + "ETH_RAMROD_RX_ADD_UDP_FILTER", + "ETH_RAMROD_RX_DELETE_UDP_FILTER", + "ETH_RAMROD_RX_CREATE_GFT_ACTION", + "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE", + "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL", + "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL", + "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", }, + { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT", + "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR", + "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ", + "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ", + "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ", + "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING", + "RDMA_RAMROD_STOP_NS_TRACKING", + "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD", + "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD", + "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR", + "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP", + "IWARP_RAMROD_CMD_ID_MODIFY_QP", + "IWARP_RAMROD_CMD_ID_DESTROY_QP", + "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", }, + { NULL }, /*TOE*/ + { NULL }, /*PREROCE*/ + { "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START", + "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START", + "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE", + "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", } +}; + /******************** INTERNAL IMPLEMENTATION *********************/ /* Returns the external VOQ number */ @@ -1647,6 +1723,32 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } +const char *qed_get_protocol_type_str(u32 protocol_type) +{ + if (protocol_type >= ARRAY_SIZE(s_protocol_types)) + return "Invalid protocol type"; + + return s_protocol_types[protocol_type]; +} + +const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id) +{ + const char *ramrod_cmd_id_str; + + if (protocol_type >= ARRAY_SIZE(s_ramrod_cmd_ids)) + return "Invalid protocol type"; + + if (ramrod_cmd_id >= ARRAY_SIZE(s_ramrod_cmd_ids[0])) + return "Invalid Ramrod command ID"; + + ramrod_cmd_id_str = s_ramrod_cmd_ids[protocol_type][ramrod_cmd_id]; + + if (!ramrod_cmd_id_str) + return "Invalid Ramrod command ID"; + + return ramrod_cmd_id_str; +} + static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) { switch (storm_id) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 6958adeca86d..82e74f62b677 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -2399,3 +2399,25 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return rc; } + +int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info) +{ + u16 sbid = p_sb->igu_sb_id; + u32 i; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + if (sbid >= NUM_OF_SBS(p_hwfn->cdev)) + return -EINVAL; + + p_info->igu_prod = qed_rd(p_hwfn, p_ptt, IGU_REG_PRODUCER_MEMORY + sbid * 4); + p_info->igu_cons = qed_rd(p_hwfn, p_ptt, IGU_REG_CONSUMER_MEM + sbid * 4); + + for (i = 0; i < PIS_PER_SB; i++) + p_info->pi[i] = (u16)qed_rd(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + sbid * 4 * PIS_PER_SB + i * 4); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 84c17e97f569..7e5127f61744 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -186,6 +186,19 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable); /** + * qed_int_get_sb_dbg: Read debug information regarding a given SB + * + * @p_hwfn: hw function pointer + * @p_ptt: ptt resource + * @p_sb: pointer to status block for which we want to get info + * @p_info: pointer to struct to fill with information regarding SB + * + * Return: 0 with status block info filled on success, otherwise return error + */ +int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info); + +/** * qed_db_rec_handler(): Doorbell Recovery handler. * Run doorbell recovery in case of PF overflow (and flush DORQ if * needed). diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 7673b3e07736..46d4207f22a3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -447,7 +447,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->wol_support = true; dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); - + dev_info->esl = qed_mcp_is_esl_supported(p_hwfn); dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, @@ -2936,6 +2936,30 @@ out: return status; } +static int +qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb, + u16 qid, struct qed_sb_info_dbg *sb_dbg) +{ + struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns]; + struct qed_ptt *ptt; + int rc; + + if (IS_VF(cdev)) + return -EINVAL; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, "Can't acquire PTT\n"); + return -EAGAIN; + } + + memset(sb_dbg, 0, sizeof(*sb_dbg)); + rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg); + + qed_ptt_release(hwfn, ptt); + return rc; +} + static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, u8 dev_addr, u32 offset, u32 len) { @@ -2978,11 +3002,54 @@ static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) return rc; } +static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...) +{ + char buf[QED_MFW_REPORT_STR_SIZE]; + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + va_list vl; + + va_start(vl, fmt); + vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl); + va_end(vl); + + if (IS_PF(cdev)) { + p_hwfn = QED_LEADING_HWFN(cdev); + p_ptt = qed_ptt_acquire(p_hwfn); + if (p_ptt) { + qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf)); + qed_ptt_release(p_hwfn, p_ptt); + } + } +} + static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) { return QED_AFFIN_HWFN_IDX(cdev); } +static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + *esl_active = false; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + static struct qed_selftest_ops qed_selftest_ops_pass = { .selftest_memory = &qed_selftest_memory, .selftest_interrupt = &qed_selftest_interrupt, @@ -3038,6 +3105,9 @@ const struct qed_common_ops qed_common_ops_pass = { .read_nvm_cfg = &qed_nvm_flash_cfg_read, .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, .set_grc_config = &qed_set_grc_config, + .mfw_report = &qed_mfw_report, + .get_sb_info = &qed_get_sb_info, + .get_esl_status = &qed_get_esl_status, }; void qed_get_protocol_stats(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 64678a256f3b..da1eadabcb41 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -4158,3 +4158,25 @@ qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, return qed_mcp_send_debug_data(p_hwfn, p_ptt, QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size); } + +bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK); +} + +int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MANAGEMENT_STATUS, 0, &resp, ¶m); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to send ESL command, rc = %d\n", rc); + return rc; + } + + *active = !!(param & FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 564723800d15..369e1892450a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -15,6 +15,8 @@ #include "qed_hsi.h" #include "qed_dev_api.h" +#define QED_MFW_REPORT_STR_SIZE 256 + struct qed_mcp_link_speed_params { bool autoneg; @@ -1337,4 +1339,24 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, u32 len); + +/** + * qed_mcp_is_esl_supported(): Return whether management firmware support ESL or not. + * + * @p_hwfn: hw function pointer + * + * Return: true if esl is supported, otherwise return false + */ +bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn); + +/** + * qed_mcp_get_esl_status(): Get enhanced system lockdown status + * + * @p_hwfn: hw function pointer + * @p_ptt: ptt resource pointer + * @active: ESL active status data pointer + * + * Return: 0 with esl status info on success, otherwise return error + */ +int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h index 8a0e3c5d4bda..b70ee8200e15 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h @@ -1191,6 +1191,7 @@ enum drv_msg_code_enum { DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001), DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002), DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004), + DRV_MSG_CODE_GET_MANAGEMENT_STATUS = DRV_MSG_CODE(0xc007), }; #define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4 diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 6f1a52e6beb2..b5e35f433a20 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -550,6 +550,8 @@ 0x1 << 1) #define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \ 0x1 << 0) +#define IGU_REG_PRODUCER_MEMORY 0x182000UL +#define IGU_REG_CONSUMER_MEM 0x183000UL #define IGU_REG_MAPPING_MEMORY \ 0x184000UL #define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 648176dfb871..3b54da963554 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -85,10 +85,12 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, goto err; } - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n", - opaque_cid, cmd, protocol, - (unsigned long)&p_ent->ramrod, + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n", + opaque_cid, qed_get_ramrod_cmd_id_str(protocol, cmd), + cmd, qed_get_protocol_type_str(protocol), protocol, + (unsigned long long)(uintptr_t)&p_ent->ramrod, D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", "MODE_CB")); diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index e0473729b161..d01b9245f811 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -139,10 +139,13 @@ err: if (!p_ptt) return -EBUSY; qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL, - "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", + "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n", le32_to_cpu(p_ent->elem.hdr.cid), + qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, + p_ent->elem.hdr.cmd_id), p_ent->elem.hdr.cmd_id, - p_ent->elem.hdr.protocol_id, + qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), + p_ent->elem.hdr.protocol_id, le16_to_cpu(p_ent->elem.hdr.echo)); qed_ptt_release(p_hwfn, p_ptt); @@ -170,13 +173,16 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, return -EINVAL; } - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n", + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n", p_ent->elem.hdr.cid, + qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, + p_ent->elem.hdr.cmd_id), p_ent->elem.hdr.cmd_id, - p_ent->elem.hdr.protocol_id, - p_ent->elem.data_ptr.hi, - p_ent->elem.data_ptr.lo, + qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), + p_ent->elem.hdr.protocol_id, + p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo, D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", "MODE_CB")); @@ -271,17 +277,27 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, { qed_spq_async_comp_cb cb; - if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE)) + if (!p_hwfn->p_spq) return -EINVAL; + if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) { + DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n", + qed_get_protocol_type_str(p_eqe->protocol_id), + p_eqe->protocol_id); + + return -EINVAL; + } + cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id]; if (cb) { return cb(p_hwfn, p_eqe->opcode, p_eqe->echo, &p_eqe->data, p_eqe->fw_return_code); } else { DP_NOTICE(p_hwfn, - "Unknown Async completion for protocol: %d\n", + "Unknown Async completion for %s:%d\n", + qed_get_protocol_type_str(p_eqe->protocol_id), p_eqe->protocol_id); + return -EINVAL; } } @@ -830,8 +846,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->recov_in_prog) { DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n", - p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id); + "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n", + qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, + p_ent->elem.hdr.cmd_id), + p_ent->elem.hdr.cmd_id, + qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), + p_ent->elem.hdr.protocol_id); /* Let the flow complete w/o any error handling */ qed_spq_recov_set_ret_code(p_ent, fw_return_code); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 100c9c52c20b..97a7ab0826ed 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -168,6 +168,8 @@ enum { QEDE_PRI_FLAG_CMT, QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */ QEDE_PRI_FLAG_RECOVER_ON_ERROR, + QEDE_PRI_FLAG_ESL_SUPPORT, /* MFW supports Enhanced System Lockdown */ + QEDE_PRI_FLAG_ESL_ACTIVE, /* Enhanced System Lockdown Active status */ QEDE_PRI_FLAG_LEN, }; @@ -175,6 +177,8 @@ static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { "Coupled-Function", "SmartAN capable", "Recover on error", + "ESL capable", + "ESL active", }; enum qede_ethtool_tests { @@ -478,6 +482,7 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) static u32 qede_get_priv_flags(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); + bool esl_active; u32 flags = 0; if (edev->dev_info.common.num_hwfns > 1) @@ -489,6 +494,14 @@ static u32 qede_get_priv_flags(struct net_device *dev) if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE)) flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR); + if (edev->dev_info.common.esl) + flags |= BIT(QEDE_PRI_FLAG_ESL_SUPPORT); + + edev->ops->common->get_esl_status(edev->cdev, &esl_active); + + if (esl_active) + flags |= BIT(QEDE_PRI_FLAG_ESL_ACTIVE); + return flags; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index e113fbd56e86..5ea9cb4311a1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1644,6 +1644,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) data_split = true; } } else { + if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) { + DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len); + qede_free_failed_tx_pkt(txq, first_bd, 0, false); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 06c6a5813606..b4e5a15e308b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -509,34 +509,95 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return 0; } -static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq) +static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp) { + char *p_sb = (char *)fp->sb_info->sb_virt; + u32 sb_size, i; + + sb_size = sizeof(struct status_block); + + for (i = 0; i < sb_size; i += 8) + DP_NOTICE(edev, + "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n", + p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3], + p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]); +} + +static void +qede_txq_fp_log_metadata(struct qede_dev *edev, + struct qede_fastpath *fp, struct qede_tx_queue *txq) +{ + struct qed_chain *p_chain = &txq->tx_pbl; + + /* Dump txq/fp/sb ids etc. other metadata */ DP_NOTICE(edev, - "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n", - txq->index, le16_to_cpu(*txq->hw_cons_ptr), - qed_chain_get_cons_idx(&txq->tx_pbl), - qed_chain_get_prod_idx(&txq->tx_pbl), - jiffies); + "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n", + fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos, + p_chain, p_chain->capacity, p_chain->size, jiffies, HZ); + + /* Dump all the relevant prod/cons indexes */ + DP_NOTICE(edev, + "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n", + le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons, + qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain)); +} + +static void +qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq) +{ + struct qed_sb_info_dbg sb_dbg; + int rc; + + /* sb info */ + qede_fp_sb_dump(edev, fp); + + memset(&sb_dbg, 0, sizeof(sb_dbg)); + rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg); + + DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n", + sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]); + + /* report to mfw */ + edev->ops->common->mfw_report(edev->cdev, + "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n", + txq->index, le16_to_cpu(*txq->hw_cons_ptr), + qed_chain_get_cons_idx(&txq->tx_pbl), + qed_chain_get_prod_idx(&txq->tx_pbl), jiffies); + if (!rc) + edev->ops->common->mfw_report(edev->cdev, + "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n", + txq->index, fp->sb_info->igu_sb_id, + sb_dbg.igu_prod, sb_dbg.igu_cons, + sb_dbg.pi[TX_PI(txq->cos)]); } static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct qede_dev *edev = netdev_priv(dev); - struct qede_tx_queue *txq; - int cos; + int i; netif_carrier_off(dev); DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue); - if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX)) - return; + for_each_queue(i) { + struct qede_tx_queue *txq; + struct qede_fastpath *fp; + int cos; - for_each_cos_in_txq(edev, cos) { - txq = &edev->fp_array[txqueue].txq[cos]; + fp = &edev->fp_array[i]; + if (!(fp->type & QEDE_FASTPATH_TX)) + continue; - if (qed_chain_get_cons_idx(&txq->tx_pbl) != - qed_chain_get_prod_idx(&txq->tx_pbl)) - qede_tx_log_print(edev, txq); + for_each_cos_in_txq(edev, cos) { + txq = &fp->txq[cos]; + + /* Dump basic metadata for all queues */ + qede_txq_fp_log_metadata(edev, fp, txq); + + if (qed_chain_get_cons_idx(&txq->tx_pbl) != + qed_chain_get_prod_idx(&txq->tx_pbl)) + qede_tx_log_print(edev, fp, txq); + } } if (IS_VF(edev)) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 8c28fabb0ff6..39176e765767 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -304,11 +304,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n", config.tx_type, config.rx_filter); - if (config.flags) { - DP_ERR(edev, "config.flags is reserved for future use\n"); - return -EINVAL; - } - ptp->hw_ts_ioctl_called = 1; ptp->tx_type = config.tx_type; ptp->rx_filter = config.rx_filter; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 1e6d72adfe43..71523d747e93 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -3480,20 +3480,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev) spin_lock_irqsave(&qdev->hw_lock, hw_flags); - err = ql_wait_for_drvr_lock(qdev); - if (err) { - err = ql_adapter_initialize(qdev); - if (err) { - netdev_err(ndev, "Unable to initialize adapter\n"); - goto err_init; - } - netdev_err(ndev, "Releasing driver lock\n"); - ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); - } else { + if (!ql_wait_for_drvr_lock(qdev)) { netdev_err(ndev, "Could not acquire driver lock\n"); + err = -ENODEV; goto err_lock; } + err = ql_adapter_initialize(qdev); + if (err) { + netdev_err(ndev, "Unable to initialize adapter\n"); + goto err_init; + } + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); set_bit(QL_ADAPTER_UP, &qdev->flags); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index d51bac7ba5af..bd0607680329 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1077,8 +1077,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) sds_mbx_size = sizeof(struct qlcnic_sds_mbx); context_id = recv_ctx->context_id; num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS; - ahw->hw_ops->alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_ADD_RCV_RINGS); + err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_ADD_RCV_RINGS); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to alloc mbx args %d\n", err); + return err; + } + cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16); /* set up status rings, mbx 2-81 */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ce09bd45527e..b215cde68e10 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2221,10 +2221,6 @@ static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) if (copy_from_user(&config, req->ifr_data, sizeof(config))) return -EFAULT; - /* Reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: tstamp_tx_ctrl = 0; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 223626290ce0..d947a628e166 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3368,8 +3368,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* MDIO bus init */ ret = sh_mdio_init(mdp, pd); if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "MDIO init failed: %d\n", ret); + dev_err_probe(&pdev->dev, ret, "MDIO init failed\n"); goto out_release; } diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 797e51802ccb..f0ef515e2ade 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1765,9 +1765,6 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) { int rc; - if (init->flags) - return -EINVAL; - if ((init->tx_type != HWTSTAMP_TX_OFF) && (init->tx_type != HWTSTAMP_TX_ON)) return -ERANGE; diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 9160f9ed363a..6b5d96bced47 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -317,6 +317,7 @@ enum tx_frame_status { tx_not_ls = 0x1, tx_err = 0x2, tx_dma_own = 0x4, + tx_err_bump_tc = 0x8, }; enum dma_irq_status { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index cbf4429fb1d2..d3b4765c1a5b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -32,6 +32,8 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x, return tx_not_ls; if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) { + ret = tx_err; + if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT)) x->tx_jabber++; if (unlikely(tdes3 & TDES3_PACKET_FLUSHED)) @@ -53,16 +55,16 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x, if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL)) x->tx_deferred++; - if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) + if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) { x->tx_underflow++; + ret |= tx_err_bump_tc; + } if (unlikely(tdes3 & TDES3_IP_HDR_ERROR)) x->tx_ip_header_error++; if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR)) x->tx_payload_error++; - - ret = tx_err; } if (unlikely(tdes3 & TDES3_DEFERRED)) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index cc1075c08996..c26ac288f981 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -132,6 +132,8 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); +static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, + u32 rxmode, u32 chan); #ifdef CONFIG_DEBUG_FS static const struct net_device_ops stmmac_netdev_ops; @@ -636,10 +638,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", __func__, config.flags, config.tx_type, config.rx_filter); - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - if (config.tx_type != HWTSTAMP_TX_OFF && config.tx_type != HWTSTAMP_TX_ON) return -ERANGE; @@ -2466,6 +2464,21 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) return !!budget && work_done; } +static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) +{ + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { + tc += 64; + + if (priv->plat->force_thresh_dma_mode) + stmmac_set_dma_operation_mode(priv, tc, tc, chan); + else + stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, + chan); + + priv->xstats.threshold = tc; + } +} + /** * stmmac_tx_clean - to manage the transmission completion * @priv: driver private structure @@ -2531,6 +2544,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) /* ... verify the status error condition */ if (unlikely(status & tx_err)) { priv->dev->stats.tx_errors++; + if (unlikely(status & tx_err_bump_tc)) + stmmac_bump_dma_threshold(priv, queue); } else { priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; @@ -2781,21 +2796,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) for (chan = 0; chan < tx_channel_count; chan++) { if (unlikely(status[chan] & tx_hard_error_bump_tc)) { /* Try to bump up the dma threshold on this failure */ - if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && - (tc <= 256)) { - tc += 64; - if (priv->plat->force_thresh_dma_mode) - stmmac_set_dma_operation_mode(priv, - tc, - tc, - chan); - else - stmmac_set_dma_operation_mode(priv, - tc, - SF_DMA_MODE, - chan); - priv->xstats.threshold = tc; - } + stmmac_bump_dma_threshold(priv, chan); } else if (unlikely(status[chan] == tx_hard_error)) { stmmac_tx_err(priv, chan); } @@ -5532,8 +5533,6 @@ static int stmmac_set_features(struct net_device *netdev, netdev_features_t features) { struct stmmac_priv *priv = netdev_priv(netdev); - bool sph_en; - u32 chan; /* Keep the COE Type in case of csum is supporting */ if (features & NETIF_F_RXCSUM) @@ -5545,10 +5544,13 @@ static int stmmac_set_features(struct net_device *netdev, */ stmmac_rx_ipc(priv, priv->hw); - sph_en = (priv->hw->rx_csum > 0) && priv->sph; + if (priv->sph_cap) { + bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; + u32 chan; - for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) - stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + } return 0; } @@ -5744,21 +5746,7 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) if (unlikely(status & tx_hard_error_bump_tc)) { /* Try to bump up the dma threshold on this failure */ - if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && - tc <= 256) { - tc += 64; - if (priv->plat->force_thresh_dma_mode) - stmmac_set_dma_operation_mode(priv, - tc, - tc, - chan); - else - stmmac_set_dma_operation_mode(priv, - tc, - SF_DMA_MODE, - chan); - priv->xstats.threshold = tc; - } + stmmac_bump_dma_threshold(priv, chan); } else if (unlikely(status == tx_hard_error)) { stmmac_tx_err(priv, chan); } diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index c99dd9735087..8624a044776f 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -626,10 +626,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - /* reserved for future extensions */ - if (cfg.flags) - return -EINVAL; - if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 33c1592d5381..751fb0bc65c5 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2654,10 +2654,6 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - /* reserved for future extensions */ - if (cfg.flags) - return -EINVAL; - switch (cfg.tx_type) { case HWTSTAMP_TX_OFF: gbe_dev->tx_ts_enabled = 0; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index f50f9a43d3ea..f47b8358669d 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -595,24 +595,24 @@ spider_net_set_multi(struct net_device *netdev) int i; u32 reg; struct spider_net_card *card = netdev_priv(netdev); - DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {}; + DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES); spider_net_set_promisc(card); if (netdev->flags & IFF_ALLMULTI) { - for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) { - set_bit(i, bitmask); - } + bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES); goto write_hash; } + bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES); + /* well, we know, what the broadcast hash value is: it's xfd hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */ - set_bit(0xfd, bitmask); + __set_bit(0xfd, bitmask); netdev_for_each_mc_addr(ha, netdev) { hash = spider_net_get_multicast_hash(netdev, ha->addr); - set_bit(hash, bitmask); + __set_bit(hash, bitmask); } write_hash: diff --git a/drivers/net/ethernet/vertexcom/Kconfig b/drivers/net/ethernet/vertexcom/Kconfig new file mode 100644 index 000000000000..4184a635fe01 --- /dev/null +++ b/drivers/net/ethernet/vertexcom/Kconfig @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Vertexcom network device configuration +# + +config NET_VENDOR_VERTEXCOM + bool "Vertexcom devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Vertexcom cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_VERTEXCOM + +config MSE102X + tristate "Vertexcom MSE102x SPI" + depends on SPI + help + SPI driver for Vertexcom MSE102x SPI attached network chip. + +endif # NET_VENDOR_VERTEXCOM diff --git a/drivers/net/ethernet/vertexcom/Makefile b/drivers/net/ethernet/vertexcom/Makefile new file mode 100644 index 000000000000..f8b12e312637 --- /dev/null +++ b/drivers/net/ethernet/vertexcom/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Vertexcom network device drivers. +# + +obj-$(CONFIG_MSE102X) += mse102x.o diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c new file mode 100644 index 000000000000..a3c2426e5597 --- /dev/null +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -0,0 +1,769 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 in-tech smart charging GmbH + * + * driver is based on micrel/ks8851_spi.c + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/cache.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +#include <linux/spi/spi.h> +#include <linux/of_net.h> + +#define MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER) + +#define DRV_NAME "mse102x" + +#define DET_CMD 0x0001 +#define DET_SOF 0x0002 +#define DET_DFT 0x55AA + +#define CMD_SHIFT 12 +#define CMD_RTS (0x1 << CMD_SHIFT) +#define CMD_CTR (0x2 << CMD_SHIFT) + +#define CMD_MASK GENMASK(15, CMD_SHIFT) +#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0) + +#define DET_CMD_LEN 4 +#define DET_SOF_LEN 2 +#define DET_DFT_LEN 2 + +#define MIN_FREQ_HZ 6000000 +#define MAX_FREQ_HZ 7142857 + +struct mse102x_stats { + u64 xfer_err; + u64 invalid_cmd; + u64 invalid_ctr; + u64 invalid_dft; + u64 invalid_len; + u64 invalid_rts; + u64 invalid_sof; + u64 tx_timeout; +}; + +static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = { + "SPI transfer errors", + "Invalid command", + "Invalid CTR", + "Invalid DFT", + "Invalid frame length", + "Invalid RTS", + "Invalid SOF", + "TX timeout", +}; + +struct mse102x_net { + struct net_device *ndev; + + u8 rxd[8]; + u8 txd[8]; + + u32 msg_enable ____cacheline_aligned; + + struct sk_buff_head txq; + struct mse102x_stats stats; +}; + +struct mse102x_net_spi { + struct mse102x_net mse102x; + struct mutex lock; /* Protect SPI frame transfer */ + struct work_struct tx_work; + struct spi_device *spidev; + struct spi_message spi_msg; + struct spi_transfer spi_xfer; + +#ifdef CONFIG_DEBUG_FS + struct dentry *device_root; +#endif +}; + +#define to_mse102x_spi(mse) container_of((mse), struct mse102x_net_spi, mse102x) + +#ifdef CONFIG_DEBUG_FS + +static int mse102x_info_show(struct seq_file *s, void *what) +{ + struct mse102x_net_spi *mses = s->private; + + seq_printf(s, "TX ring size : %u\n", + skb_queue_len(&mses->mse102x.txq)); + + seq_printf(s, "IRQ : %d\n", + mses->spidev->irq); + + seq_printf(s, "SPI effective speed : %lu\n", + (unsigned long)mses->spi_xfer.effective_speed_hz); + seq_printf(s, "SPI mode : %x\n", + mses->spidev->mode); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(mse102x_info); + +static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses) +{ + mses->device_root = debugfs_create_dir(dev_name(&mses->mse102x.ndev->dev), + NULL); + + debugfs_create_file("info", S_IFREG | 0444, mses->device_root, mses, + &mse102x_info_fops); +} + +static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses) +{ + debugfs_remove_recursive(mses->device_root); +} + +#else /* CONFIG_DEBUG_FS */ + +static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses) +{ +} + +static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses) +{ +} + +#endif + +/* SPI register read/write calls. + * + * All these calls issue SPI transactions to access the chip's registers. They + * all require that the necessary lock is held to prevent accesses when the + * chip is busy transferring packet data. + */ + +static void mse102x_tx_cmd_spi(struct mse102x_net *mse, u16 cmd) +{ + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + struct spi_transfer *xfer = &mses->spi_xfer; + struct spi_message *msg = &mses->spi_msg; + __be16 txb[2]; + int ret; + + txb[0] = cpu_to_be16(DET_CMD); + txb[1] = cpu_to_be16(cmd); + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = DET_CMD_LEN; + + ret = spi_sync(mses->spidev, msg); + if (ret < 0) { + netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", + __func__, ret); + mse->stats.xfer_err++; + } +} + +static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb) +{ + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + struct spi_transfer *xfer = &mses->spi_xfer; + struct spi_message *msg = &mses->spi_msg; + __be16 *txb = (__be16 *)mse->txd; + __be16 *cmd = (__be16 *)mse->rxd; + u8 *trx = mse->rxd; + int ret; + + txb[0] = 0; + txb[1] = 0; + + xfer->tx_buf = txb; + xfer->rx_buf = trx; + xfer->len = DET_CMD_LEN; + + ret = spi_sync(mses->spidev, msg); + if (ret < 0) { + netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", + __func__, ret); + mse->stats.xfer_err++; + } else if (*cmd != cpu_to_be16(DET_CMD)) { + net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", + __func__, *cmd); + mse->stats.invalid_cmd++; + ret = -EIO; + } else { + memcpy(rxb, trx + 2, 2); + } + + return ret; +} + +static inline void mse102x_push_header(struct sk_buff *skb) +{ + __be16 *header = skb_push(skb, DET_SOF_LEN); + + *header = cpu_to_be16(DET_SOF); +} + +static inline void mse102x_put_footer(struct sk_buff *skb) +{ + __be16 *footer = skb_put(skb, DET_DFT_LEN); + + *footer = cpu_to_be16(DET_DFT); +} + +static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, + unsigned int pad) +{ + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + struct spi_transfer *xfer = &mses->spi_xfer; + struct spi_message *msg = &mses->spi_msg; + struct sk_buff *tskb; + int ret; + + netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n", + __func__, txp, txp->len, txp->data); + + if ((skb_headroom(txp) < DET_SOF_LEN) || + (skb_tailroom(txp) < DET_DFT_LEN + pad)) { + tskb = skb_copy_expand(txp, DET_SOF_LEN, DET_DFT_LEN + pad, + GFP_KERNEL); + if (!tskb) + return -ENOMEM; + + dev_kfree_skb(txp); + txp = tskb; + } + + mse102x_push_header(txp); + + if (pad) + skb_put_zero(txp, pad); + + mse102x_put_footer(txp); + + xfer->tx_buf = txp->data; + xfer->rx_buf = NULL; + xfer->len = txp->len; + + ret = spi_sync(mses->spidev, msg); + if (ret < 0) { + netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", + __func__, ret); + mse->stats.xfer_err++; + } + + return ret; +} + +static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, + unsigned int frame_len) +{ + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + struct spi_transfer *xfer = &mses->spi_xfer; + struct spi_message *msg = &mses->spi_msg; + __be16 *sof = (__be16 *)buff; + __be16 *dft = (__be16 *)(buff + DET_SOF_LEN + frame_len); + int ret; + + xfer->rx_buf = buff; + xfer->tx_buf = NULL; + xfer->len = DET_SOF_LEN + frame_len + DET_DFT_LEN; + + ret = spi_sync(mses->spidev, msg); + if (ret < 0) { + netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", + __func__, ret); + mse->stats.xfer_err++; + } else if (*sof != cpu_to_be16(DET_SOF)) { + netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n", + __func__, *sof); + mse->stats.invalid_sof++; + ret = -EIO; + } else if (*dft != cpu_to_be16(DET_DFT)) { + netdev_dbg(mse->ndev, "%s: SPI frame tail is invalid (0x%04x)\n", + __func__, *dft); + mse->stats.invalid_dft++; + ret = -EIO; + } + + return ret; +} + +static void mse102x_dump_packet(const char *msg, int len, const char *data) +{ + printk(KERN_DEBUG ": %s - packet len:%d\n", msg, len); + print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1, + data, len, true); +} + +static void mse102x_rx_pkt_spi(struct mse102x_net *mse) +{ + struct sk_buff *skb; + unsigned int rxalign; + unsigned int rxlen; + __be16 rx = 0; + u16 cmd_resp; + u8 *rxpkt; + int ret; + + mse102x_tx_cmd_spi(mse, CMD_CTR); + ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx); + cmd_resp = be16_to_cpu(rx); + + if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) { + usleep_range(50, 100); + + mse102x_tx_cmd_spi(mse, CMD_CTR); + ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx); + if (ret) + return; + + cmd_resp = be16_to_cpu(rx); + if ((cmd_resp & CMD_MASK) != CMD_RTS) { + net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", + __func__, cmd_resp); + mse->stats.invalid_rts++; + return; + } + + net_dbg_ratelimited("%s: Unexpected response to first CMD\n", + __func__); + } + + rxlen = cmd_resp & LEN_MASK; + if (!rxlen) { + net_dbg_ratelimited("%s: No frame length defined\n", __func__); + mse->stats.invalid_len++; + return; + } + + rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4); + skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign); + if (!skb) + return; + + /* 2 bytes Start of frame (before ethernet header) + * 2 bytes Data frame tail (after ethernet frame) + * They are copied, but ignored. + */ + rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN; + if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) { + mse->ndev->stats.rx_errors++; + dev_kfree_skb(skb); + return; + } + + if (netif_msg_pktdata(mse)) + mse102x_dump_packet(__func__, skb->len, skb->data); + + skb->protocol = eth_type_trans(skb, mse->ndev); + netif_rx_ni(skb); + + mse->ndev->stats.rx_packets++; + mse->ndev->stats.rx_bytes += rxlen; +} + +static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb, + unsigned long work_timeout) +{ + unsigned int pad = 0; + __be16 rx = 0; + u16 cmd_resp; + int ret; + bool first = true; + + if (txb->len < 60) + pad = 60 - txb->len; + + while (1) { + mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad)); + ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx); + cmd_resp = be16_to_cpu(rx); + + if (!ret) { + /* ready to send frame ? */ + if (cmd_resp == CMD_CTR) + break; + + net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", + __func__, cmd_resp); + mse->stats.invalid_ctr++; + } + + /* It's not predictable how long / many retries it takes to + * send at least one packet, so TX timeouts are possible. + * That's the reason why the netdev watchdog is not used here. + */ + if (time_after(jiffies, work_timeout)) + return -ETIMEDOUT; + + if (first) { + /* throttle at first issue */ + netif_stop_queue(mse->ndev); + /* fast retry */ + usleep_range(50, 100); + first = false; + } else { + msleep(20); + } + }; + + ret = mse102x_tx_frame_spi(mse, txb, pad); + if (ret) + net_dbg_ratelimited("%s: Failed to send (%d), drop frame\n", + __func__, ret); + + return ret; +} + +#define TX_QUEUE_MAX 10 + +static void mse102x_tx_work(struct work_struct *work) +{ + /* Make sure timeout is sufficient to transfer TX_QUEUE_MAX frames */ + unsigned long work_timeout = jiffies + msecs_to_jiffies(1000); + struct mse102x_net_spi *mses; + struct mse102x_net *mse; + struct sk_buff *txb; + int ret = 0; + + mses = container_of(work, struct mse102x_net_spi, tx_work); + mse = &mses->mse102x; + + while ((txb = skb_dequeue(&mse->txq))) { + mutex_lock(&mses->lock); + ret = mse102x_tx_pkt_spi(mse, txb, work_timeout); + mutex_unlock(&mses->lock); + if (ret) { + mse->ndev->stats.tx_dropped++; + } else { + mse->ndev->stats.tx_bytes += txb->len; + mse->ndev->stats.tx_packets++; + } + + dev_kfree_skb(txb); + } + + if (ret == -ETIMEDOUT) { + if (netif_msg_timer(mse)) + netdev_err(mse->ndev, "tx work timeout\n"); + + mse->stats.tx_timeout++; + } + + netif_wake_queue(mse->ndev); +} + +static netdev_tx_t mse102x_start_xmit_spi(struct sk_buff *skb, + struct net_device *ndev) +{ + struct mse102x_net *mse = netdev_priv(ndev); + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + + netif_dbg(mse, tx_queued, ndev, + "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data); + + skb_queue_tail(&mse->txq, skb); + + if (skb_queue_len(&mse->txq) >= TX_QUEUE_MAX) + netif_stop_queue(ndev); + + schedule_work(&mses->tx_work); + + return NETDEV_TX_OK; +} + +static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np) +{ + struct net_device *ndev = mse->ndev; + int ret = of_get_ethdev_address(np, ndev); + + if (ret) { + eth_hw_addr_random(ndev); + netdev_err(ndev, "Using random MAC address: %pM\n", + ndev->dev_addr); + } +} + +/* Assumption: this is called for every incoming packet */ +static irqreturn_t mse102x_irq(int irq, void *_mse) +{ + struct mse102x_net *mse = _mse; + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + + mutex_lock(&mses->lock); + mse102x_rx_pkt_spi(mse); + mutex_unlock(&mses->lock); + + return IRQ_HANDLED; +} + +static int mse102x_net_open(struct net_device *ndev) +{ + struct mse102x_net *mse = netdev_priv(ndev); + int ret; + + ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT, + ndev->name, mse); + if (ret < 0) { + netdev_err(ndev, "Failed to get irq: %d\n", ret); + return ret; + } + + netif_dbg(mse, ifup, ndev, "opening\n"); + + netif_start_queue(ndev); + + netif_carrier_on(ndev); + + netif_dbg(mse, ifup, ndev, "network device up\n"); + + return 0; +} + +static int mse102x_net_stop(struct net_device *ndev) +{ + struct mse102x_net *mse = netdev_priv(ndev); + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + + netif_info(mse, ifdown, ndev, "shutting down\n"); + + netif_carrier_off(mse->ndev); + + /* stop any outstanding work */ + flush_work(&mses->tx_work); + + netif_stop_queue(ndev); + + skb_queue_purge(&mse->txq); + + free_irq(ndev->irq, mse); + + return 0; +} + +static const struct net_device_ops mse102x_netdev_ops = { + .ndo_open = mse102x_net_open, + .ndo_stop = mse102x_net_stop, + .ndo_start_xmit = mse102x_start_xmit_spi, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/* ethtool support */ + +static void mse102x_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *di) +{ + strscpy(di->driver, DRV_NAME, sizeof(di->driver)); + strscpy(di->bus_info, dev_name(ndev->dev.parent), sizeof(di->bus_info)); +} + +static u32 mse102x_get_msglevel(struct net_device *ndev) +{ + struct mse102x_net *mse = netdev_priv(ndev); + + return mse->msg_enable; +} + +static void mse102x_set_msglevel(struct net_device *ndev, u32 to) +{ + struct mse102x_net *mse = netdev_priv(ndev); + + mse->msg_enable = to; +} + +static void mse102x_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *estats, u64 *data) +{ + struct mse102x_net *mse = netdev_priv(ndev); + struct mse102x_stats *st = &mse->stats; + + memcpy(data, st, ARRAY_SIZE(mse102x_gstrings_stats) * sizeof(u64)); +} + +static void mse102x_get_strings(struct net_device *ndev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &mse102x_gstrings_stats, + sizeof(mse102x_gstrings_stats)); + break; + default: + WARN_ON(1); + break; + } +} + +static int mse102x_get_sset_count(struct net_device *ndev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(mse102x_gstrings_stats); + default: + return -EINVAL; + } +} + +static const struct ethtool_ops mse102x_ethtool_ops = { + .get_drvinfo = mse102x_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = mse102x_get_msglevel, + .set_msglevel = mse102x_set_msglevel, + .get_ethtool_stats = mse102x_get_ethtool_stats, + .get_strings = mse102x_get_strings, + .get_sset_count = mse102x_get_sset_count, +}; + +/* driver bus management functions */ + +#ifdef CONFIG_PM_SLEEP + +static int mse102x_suspend(struct device *dev) +{ + struct mse102x_net *mse = dev_get_drvdata(dev); + struct net_device *ndev = mse->ndev; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + mse102x_net_stop(ndev); + } + + return 0; +} + +static int mse102x_resume(struct device *dev) +{ + struct mse102x_net *mse = dev_get_drvdata(dev); + struct net_device *ndev = mse->ndev; + + if (netif_running(ndev)) { + mse102x_net_open(ndev); + netif_device_attach(ndev); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume); + +static int mse102x_probe_spi(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct mse102x_net_spi *mses; + struct net_device *ndev; + struct mse102x_net *mse; + int ret; + + spi->bits_per_word = 8; + spi->mode |= SPI_MODE_3; + /* enforce minimum speed to ensure device functionality */ + spi->master->min_speed_hz = MIN_FREQ_HZ; + + if (!spi->max_speed_hz) + spi->max_speed_hz = MAX_FREQ_HZ; + + if (spi->max_speed_hz < MIN_FREQ_HZ || + spi->max_speed_hz > MAX_FREQ_HZ) { + dev_err(&spi->dev, "SPI max frequency out of range (min: %u, max: %u)\n", + MIN_FREQ_HZ, MAX_FREQ_HZ); + return -EINVAL; + } + + ret = spi_setup(spi); + if (ret < 0) { + dev_err(&spi->dev, "Unable to setup SPI device: %d\n", ret); + return ret; + } + + ndev = devm_alloc_etherdev(dev, sizeof(struct mse102x_net_spi)); + if (!ndev) + return -ENOMEM; + + ndev->needed_tailroom += ALIGN(DET_DFT_LEN, 4); + ndev->needed_headroom += ALIGN(DET_SOF_LEN, 4); + ndev->priv_flags &= ~IFF_TX_SKB_SHARING; + ndev->tx_queue_len = 100; + + mse = netdev_priv(ndev); + mses = to_mse102x_spi(mse); + + mses->spidev = spi; + mutex_init(&mses->lock); + INIT_WORK(&mses->tx_work, mse102x_tx_work); + + /* initialise pre-made spi transfer messages */ + spi_message_init(&mses->spi_msg); + spi_message_add_tail(&mses->spi_xfer, &mses->spi_msg); + + ndev->irq = spi->irq; + mse->ndev = ndev; + + /* set the default message enable */ + mse->msg_enable = netif_msg_init(-1, MSG_DEFAULT); + + skb_queue_head_init(&mse->txq); + + SET_NETDEV_DEV(ndev, dev); + + dev_set_drvdata(dev, mse); + + netif_carrier_off(mse->ndev); + ndev->netdev_ops = &mse102x_netdev_ops; + ndev->ethtool_ops = &mse102x_ethtool_ops; + + mse102x_init_mac(mse, dev->of_node); + + ret = register_netdev(ndev); + if (ret) { + dev_err(dev, "failed to register network device: %d\n", ret); + return ret; + } + + mse102x_init_device_debugfs(mses); + + return 0; +} + +static int mse102x_remove_spi(struct spi_device *spi) +{ + struct mse102x_net *mse = dev_get_drvdata(&spi->dev); + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + + if (netif_msg_drv(mse)) + dev_info(&spi->dev, "remove\n"); + + mse102x_remove_device_debugfs(mses); + unregister_netdev(mse->ndev); + + return 0; +} + +static const struct of_device_id mse102x_match_table[] = { + { .compatible = "vertexcom,mse1021" }, + { .compatible = "vertexcom,mse1022" }, + { } +}; +MODULE_DEVICE_TABLE(of, mse102x_match_table); + +static struct spi_driver mse102x_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = mse102x_match_table, + .pm = &mse102x_pm_ops, + }, + .probe = mse102x_probe_spi, + .remove = mse102x_remove_spi, +}; +module_spi_driver(mse102x_driver); + +MODULE_DESCRIPTION("MSE102x Network driver"); +MODULE_AUTHOR("Stefan Wahren <stefan.wahren@in-tech.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index e39356364f33..23ac353b35fe 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -2051,6 +2051,7 @@ static int axienet_probe(struct platform_device *pdev) lp->phylink_config.dev = &ndev->dev; lp->phylink_config.type = PHYLINK_NETDEV; + lp->phylink_config.legacy_pre_march2020 = true; lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10FD | MAC_100FD | MAC_1000FD; diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 65fdad1107fc..df77a22d1b81 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -382,9 +382,6 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - if (cfg.flags) /* reserved for future extensions */ - return -EINVAL; - ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); if (ret) return ret; diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c index e62ab9c3ac67..2da2c4194f2e 100644 --- a/drivers/net/ipa/ipa_data-v4.5.c +++ b/drivers/net/ipa/ipa_data-v4.5.c @@ -420,15 +420,10 @@ static const struct ipa_mem_data ipa_mem_data = { /* Interconnect rates are in 1000 byte/second units */ static const struct ipa_interconnect_data ipa_interconnect_data[] = { { - .name = "memory-a", + .name = "memory", .peak_bandwidth = 600000, /* 600 MBps */ .average_bandwidth = 150000, /* 150 MBps */ }, - { - .name = "memory-b", - .peak_bandwidth = 1804000, /* 1.804 GBps */ - .average_bandwidth = 150000, /* 150 MBps */ - }, /* Average rate is unused for the next two interconnects */ { .name = "imem", diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 6cd50106e611..c613900c3811 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -291,8 +291,7 @@ void ipvlan_process_multicast(struct work_struct *work) else kfree_skb(skb); } - if (dev) - dev_put(dev); + dev_put(dev); cond_resched(); } } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 20da0b2bd246..696e245f6d00 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -100,8 +100,7 @@ static void ipvlan_port_destroy(struct net_device *dev) netdev_rx_handler_unregister(dev); cancel_work_sync(&port->wq); while ((skb = __skb_dequeue(&port->backlog)) != NULL) { - if (skb->dev) - dev_put(skb->dev); + dev_put(skb->dev); kfree_skb(skb); } ida_destroy(&port->ida); diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig index 6da1fcb25847..bfa16826a6e1 100644 --- a/drivers/net/mdio/Kconfig +++ b/drivers/net/mdio/Kconfig @@ -141,7 +141,7 @@ config MDIO_MVUSB config MDIO_MSCC_MIIM tristate "Microsemi MIIM interface support" - depends on HAS_IOMEM + depends on HAS_IOMEM && REGMAP_MMIO select MDIO_DEVRES help This driver supports the MIIM (MDIO) interface found in the network diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index ccecba908ded..ab8cd5551020 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -721,7 +721,7 @@ restart: __netpoll_cleanup(&nt->np); spin_lock_irqsave(&target_list_lock, flags); - dev_put(nt->np.dev); + dev_put_track(nt->np.dev, &nt->np.dev_tracker); nt->np.dev = NULL; nt->enabled = false; stopped = true; diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 705c16675b80..c2d1a85ec559 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1235,9 +1235,6 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - if (cfg.flags) /* reserved for future extensions */ - return -EINVAL; - if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ONESTEP_SYNC) return -ERANGE; diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index edb951695b13..34f829845d06 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -1057,9 +1057,6 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - if (cfg.flags) - return -EINVAL; - switch (cfg.tx_type) { case HWTSTAMP_TX_ONESTEP_SYNC: one_step = true; diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 2870c33b8975..271fc01f7f7f 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -162,11 +162,11 @@ static const struct phy_setting settings[] = { PHY_SETTING( 2500, FULL, 2500baseT_Full ), PHY_SETTING( 2500, FULL, 2500baseX_Full ), /* 1G */ - PHY_SETTING( 1000, FULL, 1000baseKX_Full ), PHY_SETTING( 1000, FULL, 1000baseT_Full ), PHY_SETTING( 1000, HALF, 1000baseT_Half ), PHY_SETTING( 1000, FULL, 1000baseT1_Full ), PHY_SETTING( 1000, FULL, 1000baseX_Full ), + PHY_SETTING( 1000, FULL, 1000baseKX_Full ), /* 100M */ PHY_SETTING( 100, FULL, 100baseT_Full ), PHY_SETTING( 100, FULL, 100baseT1_Full ), diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index eacbb0e6a24b..20df8af3e201 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -742,7 +742,7 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl) phylink_autoneg_inband(pl->cur_link_an_mode)) { if (pl->pcs_ops) pl->pcs_ops->pcs_an_restart(pl->pcs); - else + else if (pl->config->legacy_pre_march2020) pl->mac_ops->mac_an_restart(pl->config); } } @@ -803,7 +803,7 @@ static int phylink_change_inband_advert(struct phylink *pl) if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) return 0; - if (!pl->pcs_ops) { + if (!pl->pcs_ops && pl->config->legacy_pre_march2020) { /* Legacy method */ phylink_mac_config(pl, &pl->link_config); phylink_mac_pcs_an_restart(pl); @@ -854,7 +854,8 @@ static void phylink_mac_pcs_get_state(struct phylink *pl, if (pl->pcs_ops) pl->pcs_ops->pcs_get_state(pl->pcs, state); - else if (pl->mac_ops->mac_pcs_get_state) + else if (pl->mac_ops->mac_pcs_get_state && + pl->config->legacy_pre_march2020) pl->mac_ops->mac_pcs_get_state(pl->config, state); else state->link = 0; @@ -1048,12 +1049,11 @@ static void phylink_resolve(struct work_struct *w) } phylink_major_config(pl, false, &link_state); pl->link_config.interface = link_state.interface; - } else if (!pl->pcs_ops) { + } else if (!pl->pcs_ops && pl->config->legacy_pre_march2020) { /* The interface remains unchanged, only the speed, * duplex or pause settings have changed. Call the * old mac_config() method to configure the MAC/PCS - * only if we do not have a PCS installed (an - * unconverted user.) + * only if we do not have a legacy MAC driver. */ phylink_mac_config(pl, &link_state); } @@ -1090,6 +1090,12 @@ static void phylink_run_resolve_and_disable(struct phylink *pl, int bit) } } +static void phylink_enable_and_run_resolve(struct phylink *pl, int bit) +{ + clear_bit(bit, &pl->phylink_disable_state); + phylink_run_resolve(pl); +} + static void phylink_fixed_poll(struct timer_list *t) { struct phylink *pl = container_of(t, struct phylink, link_poll); @@ -1574,8 +1580,7 @@ void phylink_start(struct phylink *pl) */ phylink_mac_initial_config(pl, true); - clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); - phylink_run_resolve(pl); + phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_STOPPED); if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->link_gpio) { int irq = gpiod_to_irq(pl->link_gpio); @@ -1648,6 +1653,7 @@ EXPORT_SYMBOL_GPL(phylink_stop); * @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan * * Handle a network device suspend event. There are several cases: + * * - If Wake-on-Lan is not active, we can bring down the link between * the MAC and PHY by calling phylink_stop(). * - If Wake-on-Lan is active, and being handled only by the PHY, we @@ -1715,8 +1721,7 @@ void phylink_resume(struct phylink *pl) phylink_mac_initial_config(pl, true); /* Re-enable and re-resolve the link parameters */ - clear_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state); - phylink_run_resolve(pl); + phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_MAC_WOL); } else { phylink_start(pl); } @@ -2645,8 +2650,7 @@ static void phylink_sfp_link_up(void *upstream) ASSERT_RTNL(); - clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); - phylink_run_resolve(pl); + phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_LINK); } /* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 1180a0e2445f..9e52c5d2d77f 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -173,6 +173,7 @@ struct channel { spinlock_t downl; /* protects `chan', file.xq dequeue */ struct ppp *ppp; /* ppp unit we're connected to */ struct net *chan_net; /* the net channel belongs to */ + netns_tracker ns_tracker; struct list_head clist; /* link in list of channels per unit */ rwlock_t upl; /* protects `ppp' and 'bridge' */ struct channel __rcu *bridge; /* "bridged" ppp channel */ @@ -2879,7 +2880,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan) pch->ppp = NULL; pch->chan = chan; - pch->chan_net = get_net(net); + pch->chan_net = get_net_track(net, &pch->ns_tracker, GFP_KERNEL); chan->ppp = pch; init_ppp_file(&pch->file, CHANNEL); pch->file.hdrlen = chan->hdrlen; @@ -3519,7 +3520,7 @@ ppp_disconnect_channel(struct channel *pch) */ static void ppp_destroy_channel(struct channel *pch) { - put_net(pch->chan_net); + put_net_track(pch->chan_net, &pch->ns_tracker); pch->chan_net = NULL; atomic_dec(&channel_count); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 24753a4da7e6..e303b522efb5 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -181,6 +181,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx) min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32); max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)); + if (max == 0) + max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */ /* some devices set dwNtbOutMaxSize too low for the above default */ min = min(min, max); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 2bfb59ae0eaf..185e08c1af31 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2398,7 +2398,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) if (dev->domain_data.phyirq > 0) phydev->irq = dev->domain_data.phyirq; else - phydev->irq = 0; + phydev->irq = PHY_POLL; netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq); /* set to AUTOMDIX */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c74af526d79b..088d9404c93d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3425,7 +3425,6 @@ static struct virtio_driver virtio_net_driver = { .feature_table_size = ARRAY_SIZE(features), .feature_table_legacy = features_legacy, .feature_table_size_legacy = ARRAY_SIZE(features_legacy), - .suppress_used_validation = true, .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 14fae317bc70..fd407c0e2856 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3261,7 +3261,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) #ifdef CONFIG_PCI_MSI if (adapter->intr.type == VMXNET3_IT_MSIX) { - int i, nvec; + int i, nvec, nvec_allocated; nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? 1 : adapter->num_tx_queues; @@ -3274,14 +3274,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) for (i = 0; i < nvec; i++) adapter->intr.msix_entries[i].entry = i; - nvec = vmxnet3_acquire_msix_vectors(adapter, nvec); - if (nvec < 0) + nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec); + if (nvec_allocated < 0) goto msix_err; /* If we cannot allocate one MSIx vector per queue * then limit the number of rx queues to 1 */ - if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) { + if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT && + nvec != VMXNET3_LINUX_MIN_MSIX_VECT) { if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE || adapter->num_rx_queues != 1) { adapter->share_intr = VMXNET3_INTR_TXSHARE; @@ -3291,14 +3292,14 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) } } - adapter->intr.num_intrs = nvec; + adapter->intr.num_intrs = nvec_allocated; return; msix_err: /* If we cannot allocate MSIx vectors use only one rx queue */ dev_info(&adapter->pdev->dev, "Failed to enable MSI-X, error %d. " - "Limiting #rx queues to 1, try MSI.\n", nvec); + "Limiting #rx queues to 1, try MSI.\n", nvec_allocated); adapter->intr.type = VMXNET3_IT_MSI; } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index ccf677015d5b..b4c64226b7ca 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -497,6 +497,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, /* strip the ethernet header added for pass through VRF device */ __skb_pull(skb, skb_network_offset(skb)); + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); ret = vrf_ip6_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(ret))) dev->stats.tx_errors++; @@ -579,6 +580,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, RT_SCOPE_LINK); } + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); if (unlikely(net_xmit_eval(ret))) vrf_dev->stats.tx_errors++; @@ -768,8 +770,6 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; - vrf_nf_set_untracked(skb); - err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, vrf_dev, vrf_ip6_out_direct_finish); @@ -790,6 +790,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) return skb; + vrf_nf_set_untracked(skb); + if (qdisc_tx_is_default(vrf_dev) || IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) return vrf_ip6_out_direct(vrf_dev, sk, skb); @@ -812,9 +814,9 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) */ if (rt6) { dst = &rt6->dst; - dev_put(dst->dev); + dev_replace_track(dst->dev, net->loopback_dev, + &dst->dev_tracker, GFP_KERNEL); dst->dev = net->loopback_dev; - dev_hold(dst->dev); dst_release(dst); } } @@ -998,8 +1000,6 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; - vrf_nf_set_untracked(skb); - err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, vrf_dev, vrf_ip_out_direct_finish); @@ -1021,6 +1021,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, ipv4_is_lbcast(ip_hdr(skb)->daddr)) return skb; + vrf_nf_set_untracked(skb); + if (qdisc_tx_is_default(vrf_dev) || IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return vrf_ip_out_direct(vrf_dev, sk, skb); @@ -1059,9 +1061,9 @@ static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) */ if (rth) { dst = &rth->dst; - dev_put(dst->dev); + dev_replace_track(dst->dev, net->loopback_dev, + &dst->dev_tracker, GFP_KERNEL); dst->dev = net->loopback_dev; - dev_hold(dst->dev); dst_release(dst); } } diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c index b7197e80f226..9a4c8ff32d9d 100644 --- a/drivers/net/wireguard/allowedips.c +++ b/drivers/net/wireguard/allowedips.c @@ -163,7 +163,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, return exact; } -static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node) +static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node) { node->parent_bit_packed = (unsigned long)parent | bit; rcu_assign_pointer(*parent, node); diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 551ddaaaf540..a46067c38bf5 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -98,6 +98,7 @@ static int wg_stop(struct net_device *dev) { struct wg_device *wg = netdev_priv(dev); struct wg_peer *peer; + struct sk_buff *skb; mutex_lock(&wg->device_update_lock); list_for_each_entry(peer, &wg->peer_list, peer_list) { @@ -108,7 +109,9 @@ static int wg_stop(struct net_device *dev) wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); } mutex_unlock(&wg->device_update_lock); - skb_queue_purge(&wg->incoming_handshakes); + while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL) + kfree_skb(skb); + atomic_set(&wg->handshake_queue_len, 0); wg_socket_reinit(wg, NULL, NULL); return 0; } @@ -235,14 +238,13 @@ static void wg_destruct(struct net_device *dev) destroy_workqueue(wg->handshake_receive_wq); destroy_workqueue(wg->handshake_send_wq); destroy_workqueue(wg->packet_crypt_wq); - wg_packet_queue_free(&wg->decrypt_queue); - wg_packet_queue_free(&wg->encrypt_queue); + wg_packet_queue_free(&wg->handshake_queue, true); + wg_packet_queue_free(&wg->decrypt_queue, false); + wg_packet_queue_free(&wg->encrypt_queue, false); rcu_barrier(); /* Wait for all the peers to be actually freed. */ wg_ratelimiter_uninit(); memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); - skb_queue_purge(&wg->incoming_handshakes); free_percpu(dev->tstats); - free_percpu(wg->incoming_handshakes_worker); kvfree(wg->index_hashtable); kvfree(wg->peer_hashtable); mutex_unlock(&wg->device_update_lock); @@ -298,7 +300,6 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, init_rwsem(&wg->static_identity.lock); mutex_init(&wg->socket_update_lock); mutex_init(&wg->device_update_lock); - skb_queue_head_init(&wg->incoming_handshakes); wg_allowedips_init(&wg->peer_allowedips); wg_cookie_checker_init(&wg->cookie_checker, wg); INIT_LIST_HEAD(&wg->peer_list); @@ -316,16 +317,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, if (!dev->tstats) goto err_free_index_hashtable; - wg->incoming_handshakes_worker = - wg_packet_percpu_multicore_worker_alloc( - wg_packet_handshake_receive_worker, wg); - if (!wg->incoming_handshakes_worker) - goto err_free_tstats; - wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); if (!wg->handshake_receive_wq) - goto err_free_incoming_handshakes; + goto err_free_tstats; wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); @@ -347,10 +342,15 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, if (ret < 0) goto err_free_encrypt_queue; - ret = wg_ratelimiter_init(); + ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker, + MAX_QUEUED_INCOMING_HANDSHAKES); if (ret < 0) goto err_free_decrypt_queue; + ret = wg_ratelimiter_init(); + if (ret < 0) + goto err_free_handshake_queue; + ret = register_netdevice(dev); if (ret < 0) goto err_uninit_ratelimiter; @@ -367,18 +367,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, err_uninit_ratelimiter: wg_ratelimiter_uninit(); +err_free_handshake_queue: + wg_packet_queue_free(&wg->handshake_queue, false); err_free_decrypt_queue: - wg_packet_queue_free(&wg->decrypt_queue); + wg_packet_queue_free(&wg->decrypt_queue, false); err_free_encrypt_queue: - wg_packet_queue_free(&wg->encrypt_queue); + wg_packet_queue_free(&wg->encrypt_queue, false); err_destroy_packet_crypt: destroy_workqueue(wg->packet_crypt_wq); err_destroy_handshake_send: destroy_workqueue(wg->handshake_send_wq); err_destroy_handshake_receive: destroy_workqueue(wg->handshake_receive_wq); -err_free_incoming_handshakes: - free_percpu(wg->incoming_handshakes_worker); err_free_tstats: free_percpu(dev->tstats); err_free_index_hashtable: @@ -398,6 +398,7 @@ static struct rtnl_link_ops link_ops __read_mostly = { static void wg_netns_pre_exit(struct net *net) { struct wg_device *wg; + struct wg_peer *peer; rtnl_lock(); list_for_each_entry(wg, &device_list, device_list) { @@ -407,6 +408,8 @@ static void wg_netns_pre_exit(struct net *net) mutex_lock(&wg->device_update_lock); rcu_assign_pointer(wg->creating_net, NULL); wg_socket_reinit(wg, NULL, NULL); + list_for_each_entry(peer, &wg->peer_list, peer_list) + wg_socket_clear_peer_endpoint_src(peer); mutex_unlock(&wg->device_update_lock); } } diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h index 854bc3d97150..43c7cebbf50b 100644 --- a/drivers/net/wireguard/device.h +++ b/drivers/net/wireguard/device.h @@ -39,21 +39,18 @@ struct prev_queue { struct wg_device { struct net_device *dev; - struct crypt_queue encrypt_queue, decrypt_queue; + struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue; struct sock __rcu *sock4, *sock6; struct net __rcu *creating_net; struct noise_static_identity static_identity; - struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; - struct workqueue_struct *packet_crypt_wq; - struct sk_buff_head incoming_handshakes; - int incoming_handshake_cpu; - struct multicore_worker __percpu *incoming_handshakes_worker; + struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq; struct cookie_checker cookie_checker; struct pubkey_hashtable *peer_hashtable; struct index_hashtable *index_hashtable; struct allowedips peer_allowedips; struct mutex device_update_lock, socket_update_lock; struct list_head device_list, peer_list; + atomic_t handshake_queue_len; unsigned int num_peers, device_update_gen; u32 fwmark; u16 incoming_port; diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c index 75dbe77b0b4b..ee4da9ab8013 100644 --- a/drivers/net/wireguard/main.c +++ b/drivers/net/wireguard/main.c @@ -17,7 +17,7 @@ #include <linux/genetlink.h> #include <net/rtnetlink.h> -static int __init mod_init(void) +static int __init wg_mod_init(void) { int ret; @@ -60,7 +60,7 @@ err_allowedips: return ret; } -static void __exit mod_exit(void) +static void __exit wg_mod_exit(void) { wg_genetlink_uninit(); wg_device_uninit(); @@ -68,8 +68,8 @@ static void __exit mod_exit(void) wg_allowedips_slab_uninit(); } -module_init(mod_init); -module_exit(mod_exit); +module_init(wg_mod_init); +module_exit(wg_mod_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("WireGuard secure network tunnel"); MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c index 48e7b982a307..1de413b19e34 100644 --- a/drivers/net/wireguard/queueing.c +++ b/drivers/net/wireguard/queueing.c @@ -38,11 +38,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, return 0; } -void wg_packet_queue_free(struct crypt_queue *queue) +void wg_packet_queue_free(struct crypt_queue *queue, bool purge) { free_percpu(queue->worker); - WARN_ON(!__ptr_ring_empty(&queue->ring)); - ptr_ring_cleanup(&queue->ring, NULL); + WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); + ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL); } #define NEXT(skb) ((skb)->prev) diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index 52da5e963003..583adb37ee1e 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -23,7 +23,7 @@ struct sk_buff; /* queueing.c APIs: */ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, unsigned int len); -void wg_packet_queue_free(struct crypt_queue *queue); +void wg_packet_queue_free(struct crypt_queue *queue, bool purge); struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); diff --git a/drivers/net/wireguard/ratelimiter.c b/drivers/net/wireguard/ratelimiter.c index 3fedd1d21f5e..dd55e5c26f46 100644 --- a/drivers/net/wireguard/ratelimiter.c +++ b/drivers/net/wireguard/ratelimiter.c @@ -176,12 +176,12 @@ int wg_ratelimiter_init(void) (1U << 14) / sizeof(struct hlist_head))); max_entries = table_size * 8; - table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL); + table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL); if (unlikely(!table_v4)) goto err_kmemcache; #if IS_ENABLED(CONFIG_IPV6) - table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL); + table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL); if (unlikely(!table_v6)) { kvfree(table_v4); goto err_kmemcache; diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 7dc84bcca261..7b8df406c773 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -116,8 +116,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg, return; } - under_load = skb_queue_len(&wg->incoming_handshakes) >= - MAX_QUEUED_INCOMING_HANDSHAKES / 8; + under_load = atomic_read(&wg->handshake_queue_len) >= + MAX_QUEUED_INCOMING_HANDSHAKES / 8; if (under_load) { last_under_load = ktime_get_coarse_boottime_ns(); } else if (last_under_load) { @@ -212,13 +212,14 @@ static void wg_receive_handshake_packet(struct wg_device *wg, void wg_packet_handshake_receive_worker(struct work_struct *work) { - struct wg_device *wg = container_of(work, struct multicore_worker, - work)->ptr; + struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr; + struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue); struct sk_buff *skb; - while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) { + while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { wg_receive_handshake_packet(wg, skb); dev_kfree_skb(skb); + atomic_dec(&wg->handshake_queue_len); cond_resched(); } } @@ -553,22 +554,28 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): { - int cpu; - - if (skb_queue_len(&wg->incoming_handshakes) > - MAX_QUEUED_INCOMING_HANDSHAKES || - unlikely(!rng_is_initialized())) { + int cpu, ret = -EBUSY; + + if (unlikely(!rng_is_initialized())) + goto drop; + if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) { + if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) { + ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb); + spin_unlock_bh(&wg->handshake_queue.ring.producer_lock); + } + } else + ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb); + if (ret) { + drop: net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n", wg->dev->name, skb); goto err; } - skb_queue_tail(&wg->incoming_handshakes, skb); - /* Queues up a call to packet_process_queued_handshake_ - * packets(skb): - */ - cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu); + atomic_inc(&wg->handshake_queue_len); + cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu); + /* Queues up a call to packet_process_queued_handshake_packets(skb): */ queue_work_on(cpu, wg->handshake_receive_wq, - &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work); + &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work); break; } case cpu_to_le32(MESSAGE_DATA): diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index 8c496b747108..6f07b949cb81 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -308,7 +308,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer) { write_lock_bh(&peer->endpoint_lock); memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6)); - dst_cache_reset(&peer->endpoint_cache); + dst_cache_reset_now(&peer->endpoint_cache); write_unlock_bh(&peer->endpoint_lock); } diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 0e9bad33fac8..141c1b5a7b1f 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -153,6 +153,10 @@ static void ar5523_cmd_rx_cb(struct urb *urb) ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START"); return; } + if (!cmd->odata) { + ar5523_err(ar, "Unexpected WDCMSG_TARGET_START reply"); + return; + } memcpy(cmd->odata, hdr + 1, sizeof(u32)); cmd->olen = sizeof(u32); cmd->res = 0; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 5935e0973d14..72a366aa9f60 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -12,6 +12,7 @@ #include <linux/dmi.h> #include <linux/ctype.h> #include <linux/pm_qos.h> +#include <linux/nvmem-consumer.h> #include <asm/byteorder.h> #include "core.h" @@ -935,7 +936,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) } if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || - ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE) + ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE || + ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM) bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID; else bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID; @@ -1726,7 +1728,8 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) /* As of now pre-cal is valid for 10_4 variants */ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || - ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE) + ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE || + ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM) bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL; ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result); @@ -1853,6 +1856,39 @@ out_free: return ret; } +static int ath10k_download_cal_nvmem(struct ath10k *ar, const char *cell_name) +{ + struct nvmem_cell *cell; + void *buf; + size_t len; + int ret; + + cell = devm_nvmem_cell_get(ar->dev, cell_name); + if (IS_ERR(cell)) { + ret = PTR_ERR(cell); + return ret; + } + + buf = nvmem_cell_read(cell, &len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + if (ar->hw_params.cal_data_len != len) { + kfree(buf); + ath10k_warn(ar, "invalid calibration data length in nvmem-cell '%s': %zu != %u\n", + cell_name, len, ar->hw_params.cal_data_len); + return -EMSGSIZE; + } + + ret = ath10k_download_board_data(ar, buf, len); + kfree(buf); + if (ret) + ath10k_warn(ar, "failed to download calibration data from nvmem-cell '%s': %d\n", + cell_name, ret); + + return ret; +} + int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, struct ath10k_fw_file *fw_file) { @@ -2087,6 +2123,18 @@ static int ath10k_core_pre_cal_download(struct ath10k *ar) { int ret; + ret = ath10k_download_cal_nvmem(ar, "pre-calibration"); + if (ret == 0) { + ar->cal_mode = ATH10K_PRE_CAL_MODE_NVMEM; + goto success; + } else if (ret == -EPROBE_DEFER) { + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a pre-calibration nvmem-cell, try file next: %d\n", + ret); + ret = ath10k_download_cal_file(ar, ar->pre_cal_file); if (ret == 0) { ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE; @@ -2153,6 +2201,18 @@ static int ath10k_download_cal_data(struct ath10k *ar) "pre cal download procedure failed, try cal file: %d\n", ret); + ret = ath10k_download_cal_nvmem(ar, "calibration"); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_NVMEM; + goto done; + } else if (ret == -EPROBE_DEFER) { + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a calibration nvmem-cell, try file next: %d\n", + ret); + ret = ath10k_download_cal_file(ar, ar->cal_file); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_FILE; diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 5aeff2d9f6cf..9f6680b3be0a 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -877,8 +877,10 @@ enum ath10k_cal_mode { ATH10K_CAL_MODE_FILE, ATH10K_CAL_MODE_OTP, ATH10K_CAL_MODE_DT, + ATH10K_CAL_MODE_NVMEM, ATH10K_PRE_CAL_MODE_FILE, ATH10K_PRE_CAL_MODE_DT, + ATH10K_PRE_CAL_MODE_NVMEM, ATH10K_CAL_MODE_EEPROM, }; @@ -898,10 +900,14 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) return "otp"; case ATH10K_CAL_MODE_DT: return "dt"; + case ATH10K_CAL_MODE_NVMEM: + return "nvmem"; case ATH10K_PRE_CAL_MODE_FILE: return "pre-cal-file"; case ATH10K_PRE_CAL_MODE_DT: return "pre-cal-dt"; + case ATH10K_PRE_CAL_MODE_NVMEM: + return "pre-cal-nvmem"; case ATH10K_CAL_MODE_EEPROM: return "eeprom"; } diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c index de8b632b058c..aaa7b05ff49d 100644 --- a/drivers/net/wireless/ath/ath11k/ce.c +++ b/drivers/net/wireless/ath/ath11k/ce.c @@ -14,6 +14,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = { .src_nentries = 16, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE1: target->host HTT + HTC control */ @@ -40,6 +41,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ @@ -73,11 +75,12 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE8: target autonomous hif_memcpy */ { - .flags = CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, @@ -89,6 +92,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE10: target->host HTT */ @@ -142,6 +146,7 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ @@ -175,6 +180,7 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE8: target autonomous hif_memcpy */ @@ -220,6 +226,7 @@ const struct ce_attr ath11k_host_ce_config_qcn9074[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ @@ -489,18 +496,32 @@ err_unlock: return skb; } -static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe) +static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct sk_buff *skb; + struct sk_buff_head list; + __skb_queue_head_init(&list); while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) { if (!skb) continue; dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len, DMA_TO_DEVICE); - dev_kfree_skb_any(skb); + + if ((!pipe->send_cb) || ab->hw_params.credit_flow) { + dev_kfree_skb_any(skb); + continue; + } + + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) { + ath11k_dbg(ab, ATH11K_DBG_AHB, "tx ce pipe %d len %d\n", + pipe->pipe_num, skb->len); + pipe->send_cb(ab, skb); } } @@ -636,7 +657,7 @@ static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id) pipe->attr_flags = attr->flags; if (attr->src_nentries) { - pipe->send_cb = ath11k_ce_send_done_cb; + pipe->send_cb = attr->send_cb; nentries = roundup_pow_of_two(attr->src_nentries); desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); @@ -667,9 +688,10 @@ static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id) void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; + const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id]; - if (pipe->send_cb) - pipe->send_cb(pipe); + if (attr->src_nentries) + ath11k_ce_tx_process_cb(pipe); if (pipe->recv_cb) ath11k_ce_recv_process_cb(pipe); @@ -678,9 +700,10 @@ void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id) void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; + const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id]; - if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb) - pipe->send_cb(pipe); + if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries) + ath11k_ce_tx_process_cb(pipe); } EXPORT_SYMBOL(ath11k_ce_per_engine_service); @@ -953,6 +976,7 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab) void ath11k_ce_free_pipes(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; + struct ath11k_ce_ring *ce_ring; int desc_sz; int i; @@ -964,22 +988,24 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab) if (pipe->src_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); + ce_ring = pipe->src_ring; dma_free_coherent(ab->dev, pipe->src_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, - pipe->src_ring->base_addr_owner_space, - pipe->src_ring->base_addr_ce_space); + ce_ring->base_addr_owner_space_unaligned, + ce_ring->base_addr_ce_space_unaligned); kfree(pipe->src_ring); pipe->src_ring = NULL; } if (pipe->dest_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); + ce_ring = pipe->dest_ring; dma_free_coherent(ab->dev, pipe->dest_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, - pipe->dest_ring->base_addr_owner_space, - pipe->dest_ring->base_addr_ce_space); + ce_ring->base_addr_owner_space_unaligned, + ce_ring->base_addr_ce_space_unaligned); kfree(pipe->dest_ring); pipe->dest_ring = NULL; } @@ -987,11 +1013,12 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab) if (pipe->status_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); + ce_ring = pipe->status_ring; dma_free_coherent(ab->dev, pipe->status_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, - pipe->status_ring->base_addr_owner_space, - pipe->status_ring->base_addr_ce_space); + ce_ring->base_addr_owner_space_unaligned, + ce_ring->base_addr_ce_space_unaligned); kfree(pipe->status_ring); pipe->status_ring = NULL; } diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h index 713f766cac22..8255b6cfab0c 100644 --- a/drivers/net/wireless/ath/ath11k/ce.h +++ b/drivers/net/wireless/ath/ath11k/ce.h @@ -101,6 +101,7 @@ struct ce_attr { unsigned int dest_nentries; void (*recv_cb)(struct ath11k_base *, struct sk_buff *); + void (*send_cb)(struct ath11k_base *, struct sk_buff *); }; #define CE_DESC_RING_ALIGN 8 @@ -154,7 +155,7 @@ struct ath11k_ce_pipe { unsigned int buf_sz; unsigned int rx_buf_needed; - void (*send_cb)(struct ath11k_ce_pipe *); + void (*send_cb)(struct ath11k_base *, struct sk_buff *); void (*recv_cb)(struct ath11k_base *, struct sk_buff *); struct tasklet_struct intr_tq; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index b5a2af3ffc3e..dd1a1bb078c3 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -76,12 +76,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_monitor = true, .supports_shadow_regs = false, .idle_ps = false, + .supports_sta_ps = false, .cold_boot_calib = true, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .fix_l1ss = true, + .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, + .supports_dynamic_smps_6ghz = false, + .alloc_cacheable_memory = true, + .wakeup_mhi = false, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -125,12 +130,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_monitor = true, .supports_shadow_regs = false, .idle_ps = false, + .supports_sta_ps = false, .cold_boot_calib = true, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .fix_l1ss = true, + .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, + .supports_dynamic_smps_6ghz = false, + .alloc_cacheable_memory = true, + .wakeup_mhi = false, }, { .name = "qca6390 hw2.0", @@ -173,12 +183,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_monitor = false, .supports_shadow_regs = true, .idle_ps = true, + .supports_sta_ps = true, .cold_boot_calib = false, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .fix_l1ss = true, + .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, + .supports_dynamic_smps_6ghz = false, + .alloc_cacheable_memory = false, + .wakeup_mhi = true, }, { .name = "qcn9074 hw1.0", @@ -221,12 +236,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_monitor = true, .supports_shadow_regs = false, .idle_ps = false, + .supports_sta_ps = false, .cold_boot_calib = false, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), .fix_l1ss = true, + .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, + .supports_dynamic_smps_6ghz = true, + .alloc_cacheable_memory = true, + .wakeup_mhi = false, }, { .name = "wcn6855 hw2.0", @@ -269,12 +289,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_monitor = false, .supports_shadow_regs = true, .idle_ps = true, + .supports_sta_ps = true, .cold_boot_calib = false, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), .fix_l1ss = false, + .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, + .supports_dynamic_smps_6ghz = false, + .alloc_cacheable_memory = false, + .wakeup_mhi = true, }, }; @@ -392,11 +417,26 @@ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, scnprintf(variant, sizeof(variant), ",variant=%s", ab->qmi.target.bdf_ext); - scnprintf(name, name_len, - "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", - ath11k_bus_str(ab->hif.bus), - ab->qmi.target.chip_id, - ab->qmi.target.board_id, variant); + switch (ab->id.bdf_search) { + case ATH11K_BDF_SEARCH_BUS_AND_BOARD: + scnprintf(name, name_len, + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s", + ath11k_bus_str(ab->hif.bus), + ab->id.vendor, ab->id.device, + ab->id.subsystem_vendor, + ab->id.subsystem_device, + ab->qmi.target.chip_id, + ab->qmi.target.board_id, + variant); + break; + default: + scnprintf(name, name_len, + "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", + ath11k_bus_str(ab->hif.bus), + ab->qmi.target.chip_id, + ab->qmi.target.board_id, variant); + break; + } ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot using board name '%s'\n", name); @@ -633,7 +673,7 @@ static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, return 0; } -#define BOARD_NAME_SIZE 100 +#define BOARD_NAME_SIZE 200 int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) { char boardname[BOARD_NAME_SIZE]; diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 31d234a51c79..bbfc10fd5c6d 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -47,6 +47,11 @@ enum ath11k_supported_bw { ATH11K_BW_160 = 3, }; +enum ath11k_bdf_search { + ATH11K_BDF_SEARCH_DEFAULT, + ATH11K_BDF_SEARCH_BUS_AND_BOARD, +}; + enum wme_ac { WME_AC_BE, WME_AC_BK, @@ -240,6 +245,7 @@ struct ath11k_vif { bool is_started; bool is_up; bool spectral_enabled; + bool ps; u32 aid; u8 bssid[ETH_ALEN]; struct cfg80211_bitrate_mask bitrate_mask; @@ -249,6 +255,8 @@ struct ath11k_vif { int txpower; bool rsnie_present; bool wpaie_present; + bool bcca_zero_sent; + bool do_not_send_tmpl; struct ieee80211_chanctx_conf chanctx; }; @@ -759,6 +767,14 @@ struct ath11k_base { struct completion htc_suspend; + struct { + enum ath11k_bdf_search bdf_search; + u32 vendor; + u32 device; + u32 subsystem_vendor; + u32 subsystem_device; + } id; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c index fd98ba5b1130..de220a10bce3 100644 --- a/drivers/net/wireless/ath/ath11k/dbring.c +++ b/drivers/net/wireless/ath/ath11k/dbring.c @@ -87,17 +87,23 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar, req_entries = min(num_free, ring->bufs_max); num_remain = req_entries; align = ring->buf_align; - size = sizeof(*buff) + ring->buf_sz + align - 1; + size = ring->buf_sz + align - 1; while (num_remain > 0) { - buff = kzalloc(size, GFP_ATOMIC); + buff = kzalloc(sizeof(*buff), GFP_ATOMIC); if (!buff) break; + buff->payload = kzalloc(size, GFP_ATOMIC); + if (!buff->payload) { + kfree(buff); + break; + } ret = ath11k_dbring_bufs_replenish(ar, ring, buff); if (ret) { ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n", num_remain, req_entries); + kfree(buff->payload); kfree(buff); break; } @@ -282,7 +288,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab, srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; num_entry = ev->fixed.num_buf_release_entry; - size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1; + size = ring->buf_sz + ring->buf_align - 1; num_buff_reaped = 0; spin_lock_bh(&srng->lock); @@ -319,7 +325,8 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab, ring->handler(ar, &handler_data); } - memset(buff, 0, size); + buff->paddr = 0; + memset(buff->payload, 0, size); ath11k_dbring_bufs_replenish(ar, ring, buff); } @@ -346,6 +353,7 @@ void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring) idr_remove(&ring->bufs_idr, buf_id); dma_unmap_single(ar->ab->dev, buff->paddr, ring->buf_sz, DMA_FROM_DEVICE); + kfree(buff->payload); kfree(buff); } diff --git a/drivers/net/wireless/ath/ath11k/dbring.h b/drivers/net/wireless/ath/ath11k/dbring.h index f7fce9ef9c36..78a985faa0a1 100644 --- a/drivers/net/wireless/ath/ath11k/dbring.h +++ b/drivers/net/wireless/ath/ath11k/dbring.h @@ -13,7 +13,7 @@ struct ath11k_dbring_element { dma_addr_t paddr; - u8 payload[0]; + u8 *payload; }; struct ath11k_dbring_data { diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c index c86de95fbdc5..958d87429062 100644 --- a/drivers/net/wireless/ath/ath11k/debug.c +++ b/drivers/net/wireless/ath/ath11k/debug.c @@ -17,7 +17,7 @@ void ath11k_info(struct ath11k_base *ab, const char *fmt, ...) va_start(args, fmt); vaf.va = &args; dev_info(ab->dev, "%pV", &vaf); - /* TODO: Trace the log */ + trace_ath11k_log_info(ab, &vaf); va_end(args); } EXPORT_SYMBOL(ath11k_info); @@ -32,7 +32,7 @@ void ath11k_err(struct ath11k_base *ab, const char *fmt, ...) va_start(args, fmt); vaf.va = &args; dev_err(ab->dev, "%pV", &vaf); - /* TODO: Trace the log */ + trace_ath11k_log_err(ab, &vaf); va_end(args); } EXPORT_SYMBOL(ath11k_err); @@ -47,7 +47,7 @@ void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...) va_start(args, fmt); vaf.va = &args; dev_warn_ratelimited(ab->dev, "%pV", &vaf); - /* TODO: Trace the log */ + trace_ath11k_log_warn(ab, &vaf); va_end(args); } EXPORT_SYMBOL(ath11k_warn); @@ -68,7 +68,7 @@ void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask, if (ath11k_debug_mask & mask) dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf); - /* TODO: trace log */ + trace_ath11k_log_dbg(ab, mask, &vaf); va_end(args); } @@ -100,6 +100,10 @@ void ath11k_dbg_dump(struct ath11k_base *ab, dev_printk(KERN_DEBUG, ab->dev, "%s\n", linebuf); } } + + /* tracing code doesn't like null strings */ + trace_ath11k_log_dbg_dump(ab, msg ? msg : "", prefix ? prefix : "", + buf, len); } EXPORT_SYMBOL(ath11k_dbg_dump); diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h index 659a275e2eb3..fbbd5fe02aa8 100644 --- a/drivers/net/wireless/ath/ath11k/debug.h +++ b/drivers/net/wireless/ath/ath11k/debug.h @@ -60,7 +60,8 @@ static inline void ath11k_dbg_dump(struct ath11k_base *ab, #define ath11k_dbg(ar, dbg_mask, fmt, ...) \ do { \ - if (ath11k_debug_mask & dbg_mask) \ + if ((ath11k_debug_mask & dbg_mask) || \ + trace_ath11k_log_dbg_enabled()) \ __ath11k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \ } while (0) diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 80afd35337a1..dba055d085be 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -195,7 +195,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar, * received 'update stats' event, we keep a 3 seconds timeout in case, * fw_stats_done is not marked yet */ - timeout = jiffies + msecs_to_jiffies(3 * HZ); + timeout = jiffies + msecs_to_jiffies(3 * 1000); ath11k_debugfs_fw_stats_reset(ar); diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index 8baaeeb8cf82..81b0b2baa461 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -101,8 +101,11 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) if (!ring->vaddr_unaligned) return; - dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, - ring->paddr_unaligned); + if (ring->cached) + kfree(ring->vaddr_unaligned); + else + dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, + ring->paddr_unaligned); ring->vaddr_unaligned = NULL; } @@ -222,6 +225,7 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, int entry_sz = ath11k_hal_srng_get_entrysize(ab, type); int max_entries = ath11k_hal_srng_get_max_entries(ab, type); int ret; + bool cached = false; if (max_entries < 0 || entry_sz < 0) return -EINVAL; @@ -230,9 +234,29 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, num_entries = max_entries; ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1; - ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, - &ring->paddr_unaligned, - GFP_KERNEL); + + if (ab->hw_params.alloc_cacheable_memory) { + /* Allocate the reo dst and tx completion rings from cacheable memory */ + switch (type) { + case HAL_REO_DST: + case HAL_WBM2SW_RELEASE: + cached = true; + break; + default: + cached = false; + } + + if (cached) { + ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL); + ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned); + } + } + + if (!cached) + ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, + &ring->paddr_unaligned, + GFP_KERNEL); + if (!ring->vaddr_unaligned) return -ENOMEM; @@ -292,6 +316,11 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, return -EINVAL; } + if (cached) { + params.flags |= HAL_SRNG_FLAGS_CACHED; + ring->cached = 1; + } + ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); if (ret < 0) { ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", @@ -742,13 +771,12 @@ int ath11k_dp_service_srng(struct ath11k_base *ab, const struct ath11k_hw_hal_params *hal_params; int grp_id = irq_grp->grp_id; int work_done = 0; - int i = 0, j; + int i, j; int tot_work_done = 0; - while (ab->hw_params.ring_mask->tx[grp_id] >> i) { - if (ab->hw_params.ring_mask->tx[grp_id] & BIT(i)) - ath11k_dp_tx_completion_handler(ab, i); - i++; + if (ab->hw_params.ring_mask->tx[grp_id]) { + i = __fls(ab->hw_params.ring_mask->tx[grp_id]); + ath11k_dp_tx_completion_handler(ab, i); } if (ab->hw_params.ring_mask->rx_err[grp_id]) { diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index 4794ca04f213..a4c36a9be338 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -64,6 +64,7 @@ struct dp_srng { dma_addr_t paddr; int size; u32 ring_id; + u8 cached; }; struct dp_rxdma_ring { @@ -517,7 +518,8 @@ struct htt_ppdu_stats_cfg_cmd { } __packed; #define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0) -#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8) +#define HTT_PPDU_STATS_CFG_SOC_STATS BIT(8) +#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 9) #define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16) enum htt_ppdu_stats_tag_type { diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index c5320847b80a..fcd7a6d27d12 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -20,13 +20,15 @@ #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) -static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) +static inline +u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc); } -static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline +enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, + struct hal_rx_desc *desc) { if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc)) return HAL_ENCRYPT_TYPE_OPEN; @@ -34,32 +36,34 @@ static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_bas return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline +u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc); } -static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline +bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); } -static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc); } -static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, - struct sk_buff *skb) +static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, + struct sk_buff *skb) { struct ieee80211_hdr *hdr; @@ -67,8 +71,8 @@ static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, return ieee80211_has_morefrags(hdr->frame_control); } -static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, - struct sk_buff *skb) +static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, + struct sk_buff *skb) { struct ieee80211_hdr *hdr; @@ -76,37 +80,37 @@ static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; } -static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc); } -static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_attention(desc); } -static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) +static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, __le32_to_cpu(attn->info2)); } -static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) +static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, __le32_to_cpu(attn->info1)); } -static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) +static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, __le32_to_cpu(attn->info1)); } -static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) +static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) { return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, __le32_to_cpu(attn->info2)) == @@ -154,68 +158,68 @@ static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab, return errmap & DP_RX_MPDU_ERR_MSDU_LEN; } -static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc); } -static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc); } -static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc); } -static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc); } -static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc); } -static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc)); } -static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc); } -static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc); } -static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc); } -static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc); } @@ -233,14 +237,14 @@ static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab, ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc); } -static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) +static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) { return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, __le32_to_cpu(attn->info1)); } -static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, - struct hal_rx_desc *rx_desc) +static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) { u8 *rx_pkt_hdr; @@ -249,8 +253,8 @@ static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, return rx_pkt_hdr; } -static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, - struct hal_rx_desc *rx_desc) +static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) { u32 tlv_tag; @@ -259,15 +263,15 @@ static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, return tlv_tag == HAL_RX_MPDU_START; } -static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, - struct hal_rx_desc *rx_desc) +static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); } -static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, - struct hal_rx_desc *desc, - u16 len) +static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, + struct hal_rx_desc *desc, + u16 len) { ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len); } @@ -2596,36 +2600,30 @@ free_out: static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, struct napi_struct *napi, struct sk_buff_head *msdu_list, - int *quota, int ring_id) + int mac_id) { - struct ath11k_skb_rxcb *rxcb; struct sk_buff *msdu; struct ath11k *ar; struct ieee80211_rx_status rx_status = {0}; - u8 mac_id; int ret; if (skb_queue_empty(msdu_list)) return; - rcu_read_lock(); - - while (*quota && (msdu = __skb_dequeue(msdu_list))) { - rxcb = ATH11K_SKB_RXCB(msdu); - mac_id = rxcb->mac_id; - ar = ab->pdevs[mac_id].ar; - if (!rcu_dereference(ab->pdevs_active[mac_id])) { - dev_kfree_skb_any(msdu); - continue; - } + if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) { + __skb_queue_purge(msdu_list); + return; + } - if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { - dev_kfree_skb_any(msdu); - continue; - } + ar = ab->pdevs[mac_id].ar; + if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) { + __skb_queue_purge(msdu_list); + return; + } + while ((msdu = __skb_dequeue(msdu_list))) { ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status); - if (ret) { + if (unlikely(ret)) { ath11k_dbg(ab, ATH11K_DBG_DATA, "Unable to process msdu %d", ret); dev_kfree_skb_any(msdu); @@ -2633,10 +2631,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, } ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status); - (*quota)--; } - - rcu_read_unlock(); } int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, @@ -2645,19 +2640,21 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, struct ath11k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring; int num_buffs_reaped[MAX_RADIOS] = {0}; - struct sk_buff_head msdu_list; + struct sk_buff_head msdu_list[MAX_RADIOS]; struct ath11k_skb_rxcb *rxcb; int total_msdu_reaped = 0; struct hal_srng *srng; struct sk_buff *msdu; - int quota = budget; bool done = false; int buf_id, mac_id; struct ath11k *ar; - u32 *rx_desc; + struct hal_reo_dest_ring *desc; + enum hal_reo_dest_ring_push_reason push_reason; + u32 cookie; int i; - __skb_queue_head_init(&msdu_list); + for (i = 0; i < MAX_RADIOS; i++) + __skb_queue_head_init(&msdu_list[i]); srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; @@ -2666,13 +2663,11 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, ath11k_hal_srng_access_begin(ab, srng); try_again: - while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { - struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc; - enum hal_reo_dest_ring_push_reason push_reason; - u32 cookie; - + while (likely(desc = + (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, + srng))) { cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, - desc.buf_addr_info.info1); + desc->buf_addr_info.info1); buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); @@ -2681,7 +2676,7 @@ try_again: rx_ring = &ar->dp.rx_refill_buf_ring; spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); - if (!msdu) { + if (unlikely(!msdu)) { ath11k_warn(ab, "frame rx with invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); @@ -2697,37 +2692,41 @@ try_again: DMA_FROM_DEVICE); num_buffs_reaped[mac_id]++; - total_msdu_reaped++; push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, - desc.info0); - if (push_reason != - HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { + desc->info0); + if (unlikely(push_reason != + HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) { dev_kfree_skb_any(msdu); ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; continue; } - rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 & + rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); - rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 & + rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); - rxcb->is_continuation = !!(desc.rx_msdu_info.info0 & + rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID, - desc.rx_mpdu_info.meta_data); + desc->rx_mpdu_info.meta_data); rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM, - desc.rx_mpdu_info.info0); + desc->rx_mpdu_info.info0); rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, - desc.info0); + desc->info0); rxcb->mac_id = mac_id; - __skb_queue_tail(&msdu_list, msdu); + __skb_queue_tail(&msdu_list[mac_id], msdu); - if (total_msdu_reaped >= quota && !rxcb->is_continuation) { + if (rxcb->is_continuation) { + done = false; + } else { + total_msdu_reaped++; done = true; - break; } + + if (total_msdu_reaped >= budget) + break; } /* Hw might have updated the head pointer after we cached it. @@ -2736,7 +2735,7 @@ try_again: * head pointer so that we can reap complete MPDU in the current * rx processing. */ - if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { + if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) { ath11k_hal_srng_access_end(ab, srng); goto try_again; } @@ -2745,25 +2744,23 @@ try_again: spin_unlock_bh(&srng->lock); - if (!total_msdu_reaped) + if (unlikely(!total_msdu_reaped)) goto exit; for (i = 0; i < ab->num_radios; i++) { if (!num_buffs_reaped[i]) continue; + ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i); + ar = ab->pdevs[i].ar; rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], ab->hw_params.hal_params->rx_buf_rbm); } - - ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list, - "a, ring_id); - exit: - return budget - quota; + return total_msdu_reaped; } static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, @@ -4829,7 +4826,7 @@ static struct sk_buff * ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, u32 mac_id, struct sk_buff *head_msdu, struct sk_buff *last_msdu, - struct ieee80211_rx_status *rxs) + struct ieee80211_rx_status *rxs, bool *fcs_err) { struct ath11k_base *ab = ar->ab; struct sk_buff *msdu, *prev_buf; @@ -4839,12 +4836,17 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, u8 *dest, decap_format; struct ieee80211_hdr_3addr *wh; struct rx_attention *rx_attention; + u32 err_bitmap; if (!head_msdu) goto err_merge_fail; rx_desc = (struct hal_rx_desc *)head_msdu->data; rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc); + err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); + + if (err_bitmap & DP_RX_MPDU_ERR_FCS) + *fcs_err = true; if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention)) return NULL; @@ -4933,9 +4935,10 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, struct ath11k_pdev_dp *dp = &ar->dp; struct sk_buff *mon_skb, *skb_next, *header; struct ieee80211_rx_status *rxs = &dp->rx_status; + bool fcs_err = false; mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, - tail_msdu, rxs); + tail_msdu, rxs, &fcs_err); if (!mon_skb) goto mon_deliver_fail; @@ -4943,6 +4946,10 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, header = mon_skb; rxs->flag = 0; + + if (fcs_err) + rxs->flag = RX_FLAG_FAILED_FCS_CRC; + do { skb_next = mon_skb->next; if (!skb_next) diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 879fb2a9dc0c..88abd64e9047 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -95,11 +95,11 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, u8 ring_selector = 0, ring_map = 0; bool tcl_ring_retry; - if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) + if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))) return -ESHUTDOWN; - if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && - !ieee80211_is_data(hdr->frame_control)) + if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && + !ieee80211_is_data(hdr->frame_control))) return -ENOTSUPP; pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1); @@ -127,7 +127,7 @@ tcl_ring_sel: DP_TX_IDR_SIZE - 1, GFP_ATOMIC); spin_unlock_bh(&tx_ring->tx_idr_lock); - if (ret < 0) { + if (unlikely(ret < 0)) { if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) { atomic_inc(&ab->soc_stats.tx_err.misc_fail); return -ENOSPC; @@ -152,7 +152,7 @@ tcl_ring_sel: ti.meta_data_flags = arvif->tcl_metadata; } - if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) { + if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) { if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) { ti.encrypt_type = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); @@ -173,8 +173,8 @@ tcl_ring_sel: ti.bss_ast_idx = arvif->ast_idx; ti.dscp_tid_tbl_idx = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL && - ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) { + if (likely(skb->ip_summed == CHECKSUM_PARTIAL && + ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) { ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) | FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) | FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) | @@ -211,7 +211,7 @@ tcl_ring_sel: } ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(ab->dev, ti.paddr)) { + if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) { atomic_inc(&ab->soc_stats.tx_err.misc_fail); ath11k_warn(ab, "failed to DMA map data Tx buffer\n"); ret = -ENOMEM; @@ -231,7 +231,7 @@ tcl_ring_sel: ath11k_hal_srng_access_begin(ab, tcl_ring); hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring); - if (!hal_tcl_desc) { + if (unlikely(!hal_tcl_desc)) { /* NOTE: It is highly unlikely we'll be running out of tcl_ring * desc because the desc is directly enqueued onto hw queue. */ @@ -245,7 +245,7 @@ tcl_ring_sel: * checking this ring earlier for each pkt tx. * Restart ring selection if some rings are not checked yet. */ - if (ring_map != (BIT(ab->hw_params.max_tx_ring) - 1) && + if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) && ab->hw_params.max_tx_ring > 1) { tcl_ring_retry = true; ring_selector++; @@ -293,20 +293,18 @@ static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id, struct sk_buff *msdu; struct ath11k_skb_cb *skb_cb; - spin_lock_bh(&tx_ring->tx_idr_lock); - msdu = idr_find(&tx_ring->txbuf_idr, msdu_id); - if (!msdu) { + spin_lock(&tx_ring->tx_idr_lock); + msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id); + spin_unlock(&tx_ring->tx_idr_lock); + + if (unlikely(!msdu)) { ath11k_warn(ab, "tx completion for unknown msdu_id %d\n", msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); return; } skb_cb = ATH11K_SKB_CB(msdu); - idr_remove(&tx_ring->txbuf_idr, msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); - dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); dev_kfree_skb_any(msdu); @@ -325,12 +323,13 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, struct ath11k_skb_cb *skb_cb; struct ath11k *ar; - spin_lock_bh(&tx_ring->tx_idr_lock); - msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id); - if (!msdu) { + spin_lock(&tx_ring->tx_idr_lock); + msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id); + spin_unlock(&tx_ring->tx_idr_lock); + + if (unlikely(!msdu)) { ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n", ts->msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); return; } @@ -339,9 +338,6 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, ar = skb_cb->ar; - idr_remove(&tx_ring->txbuf_idr, ts->msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); - if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); @@ -435,16 +431,14 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); - rcu_read_lock(); - - if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) { + if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) { dev_kfree_skb_any(msdu); - goto exit; + return; } - if (!skb_cb->vif) { + if (unlikely(!skb_cb->vif)) { dev_kfree_skb_any(msdu); - goto exit; + return; } info = IEEE80211_SKB_CB(msdu); @@ -465,7 +459,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, (info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; - if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) { + if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar))) { if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) { if (ar->last_ppdu_id == 0) { ar->last_ppdu_id = ts->ppdu_id; @@ -494,9 +488,6 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, */ ieee80211_tx_status(ar->hw, msdu); - -exit: - rcu_read_unlock(); } static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab, @@ -505,11 +496,11 @@ static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab, { ts->buf_rel_source = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0); - if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW && - ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM) + if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW && + ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) return; - if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) + if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) return; ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON, @@ -556,8 +547,9 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head); } - if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) && - (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) { + if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) && + (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == + tx_ring->tx_status_tail))) { /* TODO: Process pending tx_status messages when kfifo_is_full() */ ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n"); } @@ -580,7 +572,7 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id); msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id); - if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) { + if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) { ath11k_dp_tx_process_htt_tx_complete(ab, (void *)tx_status, mac_id, msdu_id, @@ -588,16 +580,16 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) continue; } - spin_lock_bh(&tx_ring->tx_idr_lock); - msdu = idr_find(&tx_ring->txbuf_idr, msdu_id); - if (!msdu) { + spin_lock(&tx_ring->tx_idr_lock); + msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id); + if (unlikely(!msdu)) { ath11k_warn(ab, "tx completion for unknown msdu_id %d\n", msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); + spin_unlock(&tx_ring->tx_idr_lock); continue; } - idr_remove(&tx_ring->txbuf_idr, msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); + + spin_unlock(&tx_ring->tx_idr_lock); ar = ab->pdevs[mac_id].ar; @@ -903,7 +895,7 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask) cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); - pdev_mask = 1 << (i + 1); + pdev_mask = 1 << (ar->pdev_idx + i); cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask); cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask); diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index eaa0edca5576..7cf9e23cb922 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -627,6 +627,21 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng) return NULL; } +static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab, + struct hal_srng *srng) +{ + u32 *desc; + + /* prefetch only if desc is available */ + desc = ath11k_hal_srng_dst_peek(ab, srng); + if (likely(desc)) { + dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc), + (srng->entry_size * sizeof(u32)), + DMA_FROM_DEVICE); + prefetch(desc); + } +} + u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab, struct hal_srng *srng) { @@ -639,8 +654,15 @@ u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab, desc = srng->ring_base_vaddr + srng->u.dst_ring.tp; - srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) % - srng->ring_size; + srng->u.dst_ring.tp += srng->entry_size; + + /* wrap around to start of ring*/ + if (srng->u.dst_ring.tp == srng->ring_size) + srng->u.dst_ring.tp = 0; + + /* Try to prefetch the next descriptor in the ring */ + if (srng->flags & HAL_SRNG_FLAGS_CACHED) + ath11k_hal_srng_prefetch_desc(ab, srng); return desc; } @@ -775,11 +797,16 @@ void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock); - if (srng->ring_dir == HAL_SRNG_DIR_SRC) + if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.cached_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; - else + } else { srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr; + + /* Try to prefetch the next descriptor in the ring */ + if (srng->flags & HAL_SRNG_FLAGS_CACHED) + ath11k_hal_srng_prefetch_desc(ab, srng); + } } /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin() diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 35ed3a14e200..0f4f9ce74354 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -513,6 +513,7 @@ enum hal_srng_dir { #define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020 #define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000 #define HAL_SRNG_FLAGS_MSI_INTR 0x00020000 +#define HAL_SRNG_FLAGS_CACHED 0x20000000 #define HAL_SRNG_FLAGS_LMAC_RING 0x80000000 #define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1) diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c index 54b1d34724d7..6913b7494b9b 100644 --- a/drivers/net/wireless/ath/ath11k/htc.c +++ b/drivers/net/wireless/ath/ath11k/htc.c @@ -81,6 +81,8 @@ int ath11k_htc_send(struct ath11k_htc *htc, struct ath11k_base *ab = htc->ab; int credits = 0; int ret; + bool credit_flow_enabled = (ab->hw_params.credit_flow && + ep->tx_credit_flow_enabled); if (eid >= ATH11K_HTC_EP_COUNT) { ath11k_warn(ab, "Invalid endpoint id: %d\n", eid); @@ -89,7 +91,7 @@ int ath11k_htc_send(struct ath11k_htc *htc, skb_push(skb, sizeof(struct ath11k_htc_hdr)); - if (ep->tx_credit_flow_enabled) { + if (credit_flow_enabled) { credits = DIV_ROUND_UP(skb->len, htc->target_credit_size); spin_lock_bh(&htc->tx_lock); if (ep->tx_credits < credits) { @@ -126,7 +128,7 @@ int ath11k_htc_send(struct ath11k_htc *htc, err_unmap: dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); err_credits: - if (ep->tx_credit_flow_enabled) { + if (credit_flow_enabled) { spin_lock_bh(&htc->tx_lock); ep->tx_credits += credits; ath11k_dbg(ab, ATH11K_DBG_HTC, @@ -203,23 +205,25 @@ static int ath11k_htc_process_trailer(struct ath11k_htc *htc, break; } - switch (record->hdr.id) { - case ATH11K_HTC_RECORD_CREDITS: - len = sizeof(struct ath11k_htc_credit_report); - if (record->hdr.len < len) { - ath11k_warn(ab, "Credit report too long\n"); - status = -EINVAL; + if (ab->hw_params.credit_flow) { + switch (record->hdr.id) { + case ATH11K_HTC_RECORD_CREDITS: + len = sizeof(struct ath11k_htc_credit_report); + if (record->hdr.len < len) { + ath11k_warn(ab, "Credit report too long\n"); + status = -EINVAL; + break; + } + ath11k_htc_process_credit_report(htc, + record->credit_report, + record->hdr.len, + src_eid); + break; + default: + ath11k_warn(ab, "Unhandled record: id:%d length:%d\n", + record->hdr.id, record->hdr.len); break; } - ath11k_htc_process_credit_report(htc, - record->credit_report, - record->hdr.len, - src_eid); - break; - default: - ath11k_warn(ab, "Unhandled record: id:%d length:%d\n", - record->hdr.id, record->hdr.len); - break; } if (status) @@ -245,6 +249,29 @@ static void ath11k_htc_suspend_complete(struct ath11k_base *ab, bool ack) complete(&ab->htc_suspend); } +void ath11k_htc_tx_completion_handler(struct ath11k_base *ab, + struct sk_buff *skb) +{ + struct ath11k_htc *htc = &ab->htc; + struct ath11k_htc_ep *ep; + void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *); + u8 eid; + + eid = ATH11K_SKB_CB(skb)->eid; + if (eid >= ATH11K_HTC_EP_COUNT) + return; + + ep = &htc->endpoint[eid]; + spin_lock_bh(&htc->tx_lock); + ep_tx_complete = ep->ep_ops.ep_tx_complete; + spin_unlock_bh(&htc->tx_lock); + if (!ep_tx_complete) { + dev_kfree_skb_any(skb); + return; + } + ep_tx_complete(htc->ab, skb); +} + void ath11k_htc_rx_completion_handler(struct ath11k_base *ab, struct sk_buff *skb) { @@ -607,6 +634,11 @@ int ath11k_htc_connect_service(struct ath11k_htc *htc, disable_credit_flow_ctrl = true; } + if (!ab->hw_params.credit_flow) { + flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; + disable_credit_flow_ctrl = true; + } + req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags); req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID, conn_req->service_id); @@ -732,7 +764,10 @@ int ath11k_htc_start(struct ath11k_htc *htc) msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID, ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID); - ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n"); + if (ab->hw_params.credit_flow) + ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n"); + else + msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW; status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb); if (status) { diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h index 6c8a469d7f9d..f429b37cfdf7 100644 --- a/drivers/net/wireless/ath/ath11k/htc.h +++ b/drivers/net/wireless/ath/ath11k/htc.h @@ -83,8 +83,8 @@ enum ath11k_htc_conn_flags { ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1, ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2, ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3, - ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2, - ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3 + ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 0x4, + ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 0x8, }; enum ath11k_htc_conn_svc_status { @@ -116,6 +116,8 @@ struct ath11k_htc_conn_svc_resp { u32 svc_meta_pad; } __packed; +#define ATH11K_GLOBAL_DISABLE_CREDIT_FLOW BIT(1) + struct ath11k_htc_setup_complete_extended { u32 msg_id; u32 flags; @@ -305,5 +307,6 @@ int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid, struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ar, int size); void ath11k_htc_rx_completion_handler(struct ath11k_base *ar, struct sk_buff *skb); - +void ath11k_htc_tx_completion_handler(struct ath11k_base *ab, + struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index da35fcf5bc56..2f0b526188e4 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -1061,8 +1061,6 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = { const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = { .tx = { ATH11K_TX_RING_MASK_0, - ATH11K_TX_RING_MASK_1, - ATH11K_TX_RING_MASK_2, }, .rx_mon_status = { 0, 0, 0, 0, diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 19223d36846e..2c9d232ebfed 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -170,12 +170,17 @@ struct ath11k_hw_params { bool supports_monitor; bool supports_shadow_regs; bool idle_ps; + bool supports_sta_ps; bool cold_boot_calib; bool supports_suspend; u32 hal_desc_sz; bool fix_l1ss; + bool credit_flow; u8 max_tx_ring; const struct ath11k_hw_hal_params *hal_params; + bool supports_dynamic_smps_6ghz; + bool alloc_cacheable_memory; + bool wakeup_mhi; }; struct ath11k_hw_ops { diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 1cc55602787b..292b2b7eab11 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -775,9 +775,9 @@ static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id, arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR); arg.channel.min_power = 0; - arg.channel.max_power = channel->max_power * 2; - arg.channel.max_reg_power = channel->max_reg_power * 2; - arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; + arg.channel.max_power = channel->max_power; + arg.channel.max_reg_power = channel->max_reg_power; + arg.channel.max_antenna_gain = channel->max_antenna_gain; arg.pref_tx_streams = ar->num_tx_chains; arg.pref_rx_streams = ar->num_rx_chains; @@ -1049,6 +1049,83 @@ static int ath11k_mac_monitor_stop(struct ath11k *ar) return 0; } +static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif) +{ + struct ath11k *ar = arvif->ar; + struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_conf *conf = &ar->hw->conf; + enum wmi_sta_powersave_param param; + enum wmi_sta_ps_mode psmode; + int ret; + int timeout; + bool enable_ps; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (arvif->vif->type != NL80211_IFTYPE_STATION) + return 0; + + enable_ps = arvif->ps; + + if (!arvif->is_started) { + /* mac80211 can update vif powersave state while disconnected. + * Firmware doesn't behave nicely and consumes more power than + * necessary if PS is disabled on a non-started vdev. Hence + * force-enable PS for non-running vdevs. + */ + psmode = WMI_STA_PS_MODE_ENABLED; + } else if (enable_ps) { + psmode = WMI_STA_PS_MODE_ENABLED; + param = WMI_STA_PS_PARAM_INACTIVITY_TIME; + + timeout = conf->dynamic_ps_timeout; + if (timeout == 0) { + /* firmware doesn't like 0 */ + timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000; + } + + ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, + timeout); + if (ret) { + ath11k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n", + arvif->vdev_id, ret); + return ret; + } + } else { + psmode = WMI_STA_PS_MODE_DISABLED; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d psmode %s\n", + arvif->vdev_id, psmode ? "enable" : "disable"); + + ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode); + if (ret) { + ath11k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n", + psmode, arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +static int ath11k_mac_config_ps(struct ath11k *ar) +{ + struct ath11k_vif *arvif; + int ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath11k_mac_vif_setup_ps(arvif); + if (ret) { + ath11k_warn(ar->ab, "failed to setup powersave: %d\n", ret); + break; + } + } + + return ret; +} + static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed) { struct ath11k *ar = hw->priv; @@ -1137,11 +1214,15 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) arvif->rsnie_present = true; + else + arvif->rsnie_present = false; if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, ies, (skb_tail_pointer(bcn) - ies))) arvif->wpaie_present = true; + else + arvif->wpaie_present = false; ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn); @@ -1154,6 +1235,26 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) return ret; } +void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif) +{ + struct ieee80211_vif *vif = arvif->vif; + + if (!vif->color_change_active && !arvif->bcca_zero_sent) + return; + + if (vif->color_change_active && ieee80211_beacon_cntdwn_is_complete(vif)) { + arvif->bcca_zero_sent = true; + ieee80211_color_change_finish(vif); + return; + } + + arvif->bcca_zero_sent = false; + + if (vif->color_change_active) + ieee80211_beacon_update_cntdwn(vif); + ath11k_mac_setup_bcn_tmpl(arvif); +} + static void ath11k_control_beaconing(struct ath11k_vif *arvif, struct ieee80211_bss_conf *info) { @@ -2397,6 +2498,8 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, struct ath11k_vif *arvif = (void *)vif->drv_priv; struct peer_assoc_params peer_arg; struct ieee80211_sta *ap_sta; + struct ath11k_peer *peer; + bool is_auth = false; int ret; lockdep_assert_held(&ar->conf_mutex); @@ -2418,6 +2521,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, rcu_read_unlock(); + peer_arg.is_assoc = true; ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (ret) { ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n", @@ -2458,13 +2562,22 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, "mac vdev %d up (associated) bssid %pM aid %d\n", arvif->vdev_id, bss_conf->bssid, bss_conf->aid); - /* Authorize BSS Peer */ - ret = ath11k_wmi_set_peer_param(ar, arvif->bssid, - arvif->vdev_id, - WMI_PEER_AUTHORIZE, - 1); - if (ret) - ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret); + spin_lock_bh(&ar->ab->base_lock); + + peer = ath11k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid); + if (peer && peer->is_authorized) + is_auth = true; + + spin_unlock_bh(&ar->ab->base_lock); + + if (is_auth) { + ret = ath11k_wmi_set_peer_param(ar, arvif->bssid, + arvif->vdev_id, + WMI_PEER_AUTHORIZE, + 1); + if (ret) + ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret); + } ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, &bss_conf->he_obss_pd); @@ -2805,10 +2918,17 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, "Set staggered beacon mode for VDEV: %d\n", arvif->vdev_id); - ret = ath11k_mac_setup_bcn_tmpl(arvif); - if (ret) - ath11k_warn(ar->ab, "failed to update bcn template: %d\n", - ret); + if (!arvif->do_not_send_tmpl || !arvif->bcca_zero_sent) { + ret = ath11k_mac_setup_bcn_tmpl(arvif); + if (ret) + ath11k_warn(ar->ab, "failed to update bcn template: %d\n", + ret); + } + + if (arvif->bcca_zero_sent) + arvif->do_not_send_tmpl = true; + else + arvif->do_not_send_tmpl = false; } if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { @@ -2942,6 +3062,16 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, ath11k_mac_txpower_recalc(ar); } + if (changed & BSS_CHANGED_PS && + ar->ab->hw_params.supports_sta_ps) { + arvif->ps = vif->bss_conf.ps; + + ret = ath11k_mac_config_ps(ar); + if (ret) + ath11k_warn(ar->ab, "failed to setup ps on vdev %i: %d\n", + arvif->vdev_id, ret); + } + if (changed & BSS_CHANGED_MCAST_RATE && !ath11k_mac_vif_chan(arvif->vif, &def)) { band = def.chan->band; @@ -3009,6 +3139,25 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (ret) ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", arvif->vdev_id, ret); + + param_id = WMI_VDEV_PARAM_BSS_COLOR; + if (info->he_bss_color.enabled) + param_value = info->he_bss_color.color << + IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET; + else + param_value = IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED; + + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, + param_value); + if (ret) + ath11k_warn(ar->ab, + "failed to set bss color param on vdev %i: %d\n", + arvif->vdev_id, ret); + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "bss color param 0x%x set on vdev %i\n", + param_value, arvif->vdev_id); } else if (vif->type == NL80211_IFTYPE_STATION) { ret = ath11k_wmi_send_bss_color_change_enable_cmd(ar, arvif->vdev_id, @@ -3316,9 +3465,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif, return 0; if (cmd == DISABLE_KEY) { - /* TODO: Check if FW expects value other than NONE for del */ - /* arg.key_cipher = WMI_CIPHER_NONE; */ - arg.key_len = 0; + arg.key_cipher = WMI_CIPHER_NONE; arg.key_data = NULL; goto install; } @@ -3685,6 +3832,7 @@ static int ath11k_station_assoc(struct ath11k *ar, ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); + peer_arg.is_assoc = true; ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (ret) { ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", @@ -3824,11 +3972,27 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk) ath11k_mac_max_he_nss(he_mcs_mask))); if (changed & IEEE80211_RC_BW_CHANGED) { - err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, - WMI_PEER_CHWIDTH, bw); - if (err) - ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n", - sta->addr, bw, err); + /* Send peer assoc command before set peer bandwidth param to + * avoid the mismatch between the peer phymode and the peer + * bandwidth. + */ + ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true); + + peer_arg.is_assoc = false; + err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + if (err) { + ath11k_warn(ar->ab, "failed to send peer assoc for STA %pM vdev %i: %d\n", + sta->addr, arvif->vdev_id, err); + } else if (wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { + err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + WMI_PEER_CHWIDTH, bw); + if (err) + ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n", + sta->addr, bw, err); + } else { + ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", + sta->addr, arvif->vdev_id); + } } if (changed & IEEE80211_RC_NSS_CHANGED) { @@ -3896,6 +4060,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk) ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true); + peer_arg.is_assoc = false; err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (err) ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", @@ -4095,6 +4260,10 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, new_state == IEEE80211_STA_NOTEXIST)) { ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); + if (ar->ab->hw_params.vdev_start_delay && + vif->type == NL80211_IFTYPE_STATION) + goto free; + ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n", @@ -4116,6 +4285,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, } spin_unlock_bh(&ar->ab->base_lock); +free: kfree(arsta->tx_stats); arsta->tx_stats = NULL; @@ -4131,6 +4301,34 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, ath11k_warn(ar->ab, "Failed to associate station: %pM\n", sta->addr); } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTHORIZED) { + spin_lock_bh(&ar->ab->base_lock); + + peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); + if (peer) + peer->is_authorized = true; + + spin_unlock_bh(&ar->ab->base_lock); + + if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { + ret = ath11k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + WMI_PEER_AUTHORIZE, + 1); + if (ret) + ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", + sta->addr, arvif->vdev_id, ret); + } + } else if (old_state == IEEE80211_STA_AUTHORIZED && + new_state == IEEE80211_STA_ASSOC) { + spin_lock_bh(&ar->ab->base_lock); + + peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); + if (peer) + peer->is_authorized = false; + + spin_unlock_bh(&ar->ab->base_lock); + } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH && (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT || @@ -4561,6 +4759,10 @@ ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask, vht_cap.vht_supported = 1; vht_cap.cap = ar->pdev->cap.vht_cap; + if (ar->pdev->cap.nss_ratio_enabled) + vht_cap.vht_mcs.tx_highest |= + cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); + ath11k_set_vht_txbf_cap(ar, &vht_cap.cap); rxmcs_map = 0; @@ -5048,13 +5250,15 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) arvif = ath11k_vif_to_arvif(skb_cb->vif); if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) && arvif->is_started) { + atomic_inc(&ar->num_pending_mgmt_tx); ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb); if (ret) { + if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0) + WARN_ON_ONCE(1); + ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n", arvif->vdev_id, ret); ieee80211_free_txskb(ar->hw, skb); - } else { - atomic_inc(&ar->num_pending_mgmt_tx); } } else { ath11k_warn(ar->ab, @@ -5138,7 +5342,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw, arsta = (struct ath11k_sta *)control->sta->drv_priv; ret = ath11k_dp_tx(ar, arvif, arsta, skb); - if (ret) { + if (unlikely(ret)) { ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret); ieee80211_free_txskb(ar->hw, skb); } @@ -5484,7 +5688,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, u32 param_id, param_value; u16 nss; int i; - int ret; + int ret, fbret; int bit; vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; @@ -5638,7 +5842,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, goto err_peer_del; } - ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false); + ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, + WMI_STA_PS_MODE_DISABLED); if (ret) { ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n", arvif->vdev_id, ret); @@ -5686,17 +5891,17 @@ err_peer_del: if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { reinit_completion(&ar->peer_delete_done); - ret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, - arvif->vdev_id); - if (ret) { + fbret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, + arvif->vdev_id); + if (fbret) { ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", arvif->vdev_id, vif->addr); goto err; } - ret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id, - vif->addr); - if (ret) + fbret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id, + vif->addr); + if (fbret) goto err; ar->num_peers--; @@ -5831,7 +6036,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); - changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; ar->filter_flags = *total_flags; @@ -5969,9 +6173,9 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, ath11k_phymodes[chandef->chan->band][chandef->width]; arg.channel.min_power = 0; - arg.channel.max_power = chandef->chan->max_power * 2; - arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; - arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; + arg.channel.max_power = chandef->chan->max_power; + arg.channel.max_reg_power = chandef->chan->max_reg_power; + arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain; arg.pref_tx_streams = ar->num_tx_chains; arg.pref_rx_streams = ar->num_rx_chains; @@ -6468,6 +6672,19 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, arvif->is_started = false; if (ab->hw_params.vdev_start_delay && + arvif->vdev_type == WMI_VDEV_TYPE_STA) { + ret = ath11k_peer_delete(ar, arvif->vdev_id, arvif->bssid); + if (ret) + ath11k_warn(ar->ab, + "failed to delete peer %pM for vdev %d: %d\n", + arvif->bssid, arvif->vdev_id, ret); + else + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac removed peer %pM vdev %d after vdev stop\n", + arvif->bssid, arvif->vdev_id); + } + + if (ab->hw_params.vdev_start_delay && arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) ath11k_wmi_vdev_down(ar, arvif->vdev_id); @@ -7277,21 +7494,20 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, sinfo->tx_duration = arsta->tx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); - if (!arsta->txrate.legacy && !arsta->txrate.nss) - return; - - if (arsta->txrate.legacy) { - sinfo->txrate.legacy = arsta->txrate.legacy; - } else { - sinfo->txrate.mcs = arsta->txrate.mcs; - sinfo->txrate.nss = arsta->txrate.nss; - sinfo->txrate.bw = arsta->txrate.bw; - sinfo->txrate.he_gi = arsta->txrate.he_gi; - sinfo->txrate.he_dcm = arsta->txrate.he_dcm; - sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; + if (arsta->txrate.legacy || arsta->txrate.nss) { + if (arsta->txrate.legacy) { + sinfo->txrate.legacy = arsta->txrate.legacy; + } else { + sinfo->txrate.mcs = arsta->txrate.mcs; + sinfo->txrate.nss = arsta->txrate.nss; + sinfo->txrate.bw = arsta->txrate.bw; + sinfo->txrate.he_gi = arsta->txrate.he_gi; + sinfo->txrate.he_dcm = arsta->txrate.he_dcm; + sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; + } + sinfo->txrate.flags = arsta->txrate.flags; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } - sinfo->txrate.flags = arsta->txrate.flags; - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); /* TODO: Use real NF instead of default one. */ sinfo->signal = arsta->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR; @@ -7672,7 +7888,8 @@ static int __ath11k_mac_register(struct ath11k *ar) * for each band for a dual band capable radio. It will be tricky to * handle it when the ht capability different for each band. */ - if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || ar->supports_6ghz) + if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || + (ar->supports_6ghz && ab->hw_params.supports_dynamic_smps_6ghz)) ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; @@ -7703,6 +7920,9 @@ static int __ath11k_mac_register(struct ath11k *ar) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); + if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, ar->ab->wmi_ab.svc_map)) + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_BSS_COLOR); ar->hw->wiphy->cipher_suites = cipher_suites; ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h index 254ca4acc8e8..f6f37e8c8c6a 100644 --- a/drivers/net/wireless/ath/ath11k/mac.h +++ b/drivers/net/wireless/ath/ath11k/mac.h @@ -155,4 +155,5 @@ enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw b enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher); void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb); void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id); +void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif); #endif diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 3d353e7c9d5c..c2af8184e4a2 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -182,7 +182,8 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) /* for offset beyond BAR + 4K - 32, may * need to wakeup MHI to access. */ - if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && offset >= ACCESS_ALWAYS_OFF) mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); @@ -206,7 +207,8 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) } } - if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && offset >= ACCESS_ALWAYS_OFF) mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); } @@ -219,7 +221,8 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) /* for offset beyond BAR + 4K - 32, may * need to wakeup MHI to access. */ - if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && offset >= ACCESS_ALWAYS_OFF) mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); @@ -243,7 +246,8 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) } } - if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && offset >= ACCESS_ALWAYS_OFF) mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); @@ -1251,6 +1255,15 @@ static int ath11k_pci_probe(struct pci_dev *pdev, goto err_free_core; } + ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", + pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device); + + ab->id.vendor = pdev->vendor; + ab->id.device = pdev->device; + ab->id.subsystem_vendor = pdev->subsystem_vendor; + ab->id.subsystem_device = pdev->subsystem_device; + switch (pci_dev->device) { case QCA6390_DEVICE_ID: ath11k_pci_read_hw_version(ab, &soc_hw_version_major, @@ -1273,6 +1286,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ab->hw_rev = ATH11K_HW_QCN9074_HW10; break; case WCN6855_DEVICE_ID: + ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD; ath11k_pci_read_hw_version(ab, &soc_hw_version_major, &soc_hw_version_minor); switch (soc_hw_version_major) { diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h index 619db001be8e..63fe5665badf 100644 --- a/drivers/net/wireless/ath/ath11k/peer.h +++ b/drivers/net/wireless/ath/ath11k/peer.h @@ -28,6 +28,7 @@ struct ath11k_peer { u8 ucast_keyidx; u16 sec_type; u16 sec_type_grp; + bool is_authorized; }; void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index fa73118de6db..25eb22cbeaeb 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -1586,7 +1586,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) { struct qmi_wlanfw_host_cap_req_msg_v01 req; struct qmi_wlanfw_host_cap_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); @@ -1640,6 +1640,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_host_cap_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send host capability request: %d\n", ret); goto out; } @@ -1705,6 +1706,7 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab) QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_ind_register_req_msg_v01_ei, req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send indication register request: %d\n", ret); goto out; @@ -1734,7 +1736,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) { struct qmi_wlanfw_respond_mem_req_msg_v01 *req; struct qmi_wlanfw_respond_mem_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0, i; bool delayed; @@ -1783,6 +1785,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_respond_mem_req_msg_v01_ei, req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to respond qmi memory request: %d\n", ret); goto out; @@ -1911,7 +1914,7 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) { struct qmi_wlanfw_cap_req_msg_v01 req; struct qmi_wlanfw_cap_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0; int r; @@ -1930,6 +1933,7 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_cap_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send qmi cap request: %d\n", ret); goto out; @@ -2000,7 +2004,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, { struct qmi_wlanfw_bdf_download_req_msg_v01 *req; struct qmi_wlanfw_bdf_download_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; const u8 *temp = data; void __iomem *bdf_addr = NULL; int ret; @@ -2245,7 +2249,7 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; struct qmi_wlanfw_m3_info_req_msg_v01 req; struct qmi_wlanfw_m3_info_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); @@ -2277,6 +2281,7 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN, qmi_wlanfw_m3_info_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send m3 information request: %d\n", ret); goto out; @@ -2303,7 +2308,7 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab, { struct qmi_wlanfw_wlan_mode_req_msg_v01 req; struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); @@ -2325,6 +2330,7 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab, QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n", mode, ret); goto out; @@ -2358,7 +2364,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab) struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp; struct ce_pipe_config *ce_cfg; struct service_to_pipe *svc_cfg; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0, pipe_num; ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce; @@ -2419,6 +2425,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab) QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send wlan config request: %d\n", ret); goto out; diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index a66b5bdd2167..8606170ba80d 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -456,6 +456,9 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw) { u16 bw; + if (end_freq <= start_freq) + return 0; + bw = end_freq - start_freq; bw = min_t(u16, bw, max_bw); @@ -463,8 +466,10 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw) bw = 80; else if (bw >= 40 && bw < 80) bw = 40; - else if (bw < 40) + else if (bw >= 20 && bw < 40) bw = 20; + else + bw = 0; return bw; } @@ -488,73 +493,77 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab, struct cur_reg_rule *reg_rule, u8 *rule_idx, u32 flags, u16 max_bw) { + u32 start_freq; u32 end_freq; u16 bw; u8 i; i = *rule_idx; + /* there might be situations when even the input rule must be dropped */ + i--; + + /* frequencies below weather radar */ bw = ath11k_reg_adjust_bw(reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW, max_bw); + if (bw > 0) { + i++; - ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq, - ETSI_WEATHER_RADAR_BAND_LOW, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, + reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); - - if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH) - end_freq = ETSI_WEATHER_RADAR_BAND_HIGH; - else - end_freq = reg_rule->end_freq; + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW, bw, reg_rule->ant_gain, + reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms, + flags); + } - bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq, - max_bw); + /* weather radar frequencies */ + start_freq = max_t(u32, reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW); + end_freq = min_t(u32, reg_rule->end_freq, ETSI_WEATHER_RADAR_BAND_HIGH); - i++; + bw = ath11k_reg_adjust_bw(start_freq, end_freq, max_bw); + if (bw > 0) { + i++; - ath11k_reg_update_rule(regd->reg_rules + i, - ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, start_freq, + end_freq, bw, reg_rule->ant_gain, + reg_rule->reg_power, flags); - regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT; + regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT; - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); - - if (end_freq == reg_rule->end_freq) { - regd->n_reg_rules--; - *rule_idx = i; - return; + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, start_freq, end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + regd->reg_rules[i].dfs_cac_ms, flags); } + /* frequencies above weather radar */ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq, max_bw); + if (bw > 0) { + i++; - i++; - - ath11k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH, - reg_rule->end_freq, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, + ETSI_WEATHER_RADAR_BAND_HIGH, + reg_rule->end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, + reg_rule->end_freq, bw, reg_rule->ant_gain, + reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms, + flags); + } *rule_idx = i; } diff --git a/drivers/net/wireless/ath/ath11k/trace.c b/drivers/net/wireless/ath/ath11k/trace.c index f0cc49ba0387..6620650d7845 100644 --- a/drivers/net/wireless/ath/ath11k/trace.c +++ b/drivers/net/wireless/ath/ath11k/trace.c @@ -7,3 +7,4 @@ #define CREATE_TRACE_POINTS #include "trace.h" +EXPORT_SYMBOL(__tracepoint_ath11k_log_dbg); diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h index 25d18e9d5b0b..02003dc4207d 100644 --- a/drivers/net/wireless/ath/ath11k/trace.h +++ b/drivers/net/wireless/ath/ath11k/trace.h @@ -14,12 +14,24 @@ #if !defined(CONFIG_ATH11K_TRACING) #undef TRACE_EVENT #define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} \ +static inline bool trace_##name##_enabled(void) \ +{ \ + return false; \ +} + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(...) +#undef DEFINE_EVENT +#define DEFINE_EVENT(evt_class, name, proto, ...) \ static inline void trace_ ## name(proto) {} #endif /* !CONFIG_ATH11K_TRACING || __CHECKER__ */ #undef TRACE_SYSTEM #define TRACE_SYSTEM ath11k +#define ATH11K_MSG_MAX 400 + TRACE_EVENT(ath11k_htt_pktlog, TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len, u32 pktlog_checksum), @@ -108,6 +120,166 @@ TRACE_EVENT(ath11k_htt_rxdesc, ) ); +DECLARE_EVENT_CLASS(ath11k_log_event, + TP_PROTO(struct ath11k_base *ab, struct va_format *vaf), + TP_ARGS(ab, vaf), + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __dynamic_array(char, msg, ATH11K_MSG_MAX) + ), + TP_fast_assign( + __assign_str(device, dev_name(ab->dev)); + __assign_str(driver, dev_driver_string(ab->dev)); + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + ATH11K_MSG_MAX, + vaf->fmt, + *vaf->va) >= ATH11K_MSG_MAX); + ), + TP_printk( + "%s %s %s", + __get_str(driver), + __get_str(device), + __get_str(msg) + ) +); + +DEFINE_EVENT(ath11k_log_event, ath11k_log_err, + TP_PROTO(struct ath11k_base *ab, struct va_format *vaf), + TP_ARGS(ab, vaf) +); + +DEFINE_EVENT(ath11k_log_event, ath11k_log_warn, + TP_PROTO(struct ath11k_base *ab, struct va_format *vaf), + TP_ARGS(ab, vaf) +); + +DEFINE_EVENT(ath11k_log_event, ath11k_log_info, + TP_PROTO(struct ath11k_base *ab, struct va_format *vaf), + TP_ARGS(ab, vaf) +); + +TRACE_EVENT(ath11k_wmi_cmd, + TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len), + + TP_ARGS(ab, id, buf, buf_len), + + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __field(unsigned int, id) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ab->dev)); + __assign_str(driver, dev_driver_string(ab->dev)); + __entry->id = id; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "%s %s id %d len %zu", + __get_str(driver), + __get_str(device), + __entry->id, + __entry->buf_len + ) +); + +TRACE_EVENT(ath11k_wmi_event, + TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len), + + TP_ARGS(ab, id, buf, buf_len), + + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __field(unsigned int, id) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ab->dev)); + __assign_str(driver, dev_driver_string(ab->dev)); + __entry->id = id; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "%s %s id %d len %zu", + __get_str(driver), + __get_str(device), + __entry->id, + __entry->buf_len + ) +); + +TRACE_EVENT(ath11k_log_dbg, + TP_PROTO(struct ath11k_base *ab, unsigned int level, struct va_format *vaf), + + TP_ARGS(ab, level, vaf), + + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __field(unsigned int, level) + __dynamic_array(char, msg, ATH11K_MSG_MAX) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ab->dev)); + __assign_str(driver, dev_driver_string(ab->dev)); + __entry->level = level; + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + ATH11K_MSG_MAX, vaf->fmt, + *vaf->va) >= ATH11K_MSG_MAX); + ), + + TP_printk( + "%s %s %s", + __get_str(driver), + __get_str(device), + __get_str(msg) + ) +); + +TRACE_EVENT(ath11k_log_dbg_dump, + TP_PROTO(struct ath11k_base *ab, const char *msg, const char *prefix, + const void *buf, size_t buf_len), + + TP_ARGS(ab, msg, prefix, buf, buf_len), + + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __string(msg, msg) + __string(prefix, prefix) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ab->dev)); + __assign_str(driver, dev_driver_string(ab->dev)); + __assign_str(msg, msg); + __assign_str(prefix, prefix); + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "%s %s %s/%s\n", + __get_str(driver), + __get_str(device), + __get_str(prefix), + __get_str(msg) + ) +); #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ /* we don't want to use include/trace/events */ diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 5ae2ef4680d6..614b2f6bcc8e 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -128,6 +128,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = { .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, + [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = { + .min_len = sizeof(struct wmi_obss_color_collision_event) }, }; #define PRIMAP(_hw_mode_) \ @@ -249,6 +251,8 @@ static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buf cmd_hdr = (struct wmi_cmd_hdr *)skb->data; cmd_hdr->cmd_id = cmd; + trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len); + memset(skb_cb, 0, sizeof(*skb_cb)); ret = ath11k_htc_send(&ab->htc, wmi->eid, skb); @@ -267,21 +271,39 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, { struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab; int ret = -EOPNOTSUPP; + struct ath11k_base *ab = wmi_sc->ab; might_sleep(); - wait_event_timeout(wmi_sc->tx_credits_wq, ({ - ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); + if (ab->hw_params.credit_flow) { + wait_event_timeout(wmi_sc->tx_credits_wq, ({ + ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); + + if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, + &wmi_sc->ab->dev_flags)) + ret = -ESHUTDOWN; + + (ret != -EAGAIN); + }), WMI_SEND_TIMEOUT_HZ); + } else { + wait_event_timeout(wmi->tx_ce_desc_wq, ({ + ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); - if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags)) - ret = -ESHUTDOWN; + if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, + &wmi_sc->ab->dev_flags)) + ret = -ESHUTDOWN; - (ret != -EAGAIN); - }), WMI_SEND_TIMEOUT_HZ); + (ret != -ENOBUFS); + }), WMI_SEND_TIMEOUT_HZ); + } if (ret == -EAGAIN) ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id); + if (ret == -ENOBUFS) + ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n", + cmd_id); + return ret; } @@ -1244,7 +1266,8 @@ int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id, return ret; } -int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable) +int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, + enum wmi_sta_ps_mode psmode) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_ps_mode_cmd *cmd; @@ -1259,7 +1282,7 @@ int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable) cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; - cmd->sta_ps_mode = enable; + cmd->sta_ps_mode = psmode; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID); if (ret) { @@ -1269,7 +1292,7 @@ int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable) ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev set psmode %d vdev id %d\n", - enable, vdev_id); + psmode, vdev_id); return ret; } @@ -1612,6 +1635,15 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, void *ptr; int ret, len; size_t aligned_len = roundup(bcn->len, 4); + struct ieee80211_vif *vif; + struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id); + + if (!arvif) { + ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id); + return -EINVAL; + } + + vif = arvif->vif; len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; @@ -1624,8 +1656,12 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->tim_ie_offset = offs->tim_offset; - cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0]; - cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1]; + + if (vif->csa_active) { + cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0]; + cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1]; + } + cmd->buf_len = bcn->len; ptr = skb->data + sizeof(*cmd); @@ -1689,7 +1725,8 @@ int ath11k_wmi_vdev_install_key(struct ath11k *ar, tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, key_len_aligned); - memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); + if (arg->key_data) + memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); if (ret) { @@ -1762,7 +1799,7 @@ ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, cmd->peer_flags |= WMI_PEER_AUTH; if (param->need_ptk_4_way) { cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; - if (!hw_crypto_disabled) + if (!hw_crypto_disabled && param->is_assoc) cmd->peer_flags &= ~WMI_PEER_AUTH; } if (param->need_gtk_2_way) @@ -2386,6 +2423,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, tchan_info->reg_class_id); *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX, tchan_info->antennamax); + *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR, + tchan_info->maxregpower); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", @@ -3428,6 +3467,53 @@ int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval, } static void +ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + const void **tb; + const struct wmi_obss_color_collision_event *ev; + struct ath11k_vif *arvif; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch obss color collision ev"); + goto exit; + } + + arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); + if (!arvif) { + ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n", + ev->vdev_id); + goto exit; + } + + switch (ev->evt_type) { + case WMI_BSS_COLOR_COLLISION_DETECTION: + ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap); + ath11k_dbg(ab, ATH11K_DBG_WMI, + "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n", + ev->vdev_id, ev->evt_type, ev->obss_color_bitmap); + break; + case WMI_BSS_COLOR_COLLISION_DISABLE: + case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: + case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: + break; + default: + ath11k_warn(ab, "received unknown obss color collision detetction event\n"); + } + +exit: + kfree(tb); +} + +static void ath11k_fill_band_to_mac_param(struct ath11k_base *soc, struct wmi_host_pdev_band_to_mac *band_to_mac) { @@ -5813,7 +5899,30 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab) static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab, struct sk_buff *skb) { + struct ath11k_pdev_wmi *wmi = NULL; + u32 i; + u8 wmi_ep_count; + u8 eid; + + eid = ATH11K_SKB_CB(skb)->eid; dev_kfree_skb(skb); + + if (eid >= ATH11K_HTC_EP_COUNT) + return; + + wmi_ep_count = ab->htc.wmi_ep_count; + if (wmi_ep_count > ab->hw_params.max_radios) + return; + + for (i = 0; i < ab->htc.wmi_ep_count; i++) { + if (ab->wmi_ab.wmi[i].eid == eid) { + wmi = &ab->wmi_ab.wmi[i]; + break; + } + } + + if (wmi) + wake_up(&wmi->tx_ce_desc_wq); } static bool ath11k_reg_is_world_alpha(char *alpha) @@ -6111,6 +6220,7 @@ static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb) { + struct ath11k_vif *arvif; u32 vdev_id, tx_status; if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len, @@ -6118,6 +6228,14 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s ath11k_warn(ab, "failed to extract bcn tx status"); return; } + + arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id); + if (!arvif) { + ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status", + vdev_id); + return; + } + ath11k_mac_bcn_tx_event(arvif); } static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb) @@ -6398,6 +6516,7 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff struct ieee80211_sta *sta; struct ath11k_peer *peer; struct ath11k *ar; + u32 vdev_id; if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { ath11k_warn(ab, "failed to extract peer sta kickout event"); @@ -6413,10 +6532,15 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff if (!peer) { ath11k_warn(ab, "peer not found %pM\n", arg.mac_addr); + spin_unlock_bh(&ab->base_lock); goto exit; } - ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id); + vdev_id = peer->vdev_id; + + spin_unlock_bh(&ab->base_lock); + + ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d", peer->vdev_id); @@ -6437,7 +6561,6 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff ieee80211_report_low_ack(sta, 10); exit: - spin_unlock_bh(&ab->base_lock); rcu_read_unlock(); } @@ -7054,6 +7177,8 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) cmd_hdr = (struct wmi_cmd_hdr *)skb->data; id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id)); + trace_ath11k_wmi_event(ab, id, skb->data, skb->len); + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) goto out; @@ -7138,6 +7263,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: ath11k_probe_resp_tx_status_event(ab, skb); break; + case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: + ath11k_wmi_obss_color_collision_event(ab, skb); + break; /* add Unsupported events here */ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: case WMI_PEER_OPER_MODE_CHANGE_EVENTID: @@ -7199,6 +7327,7 @@ static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab, ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid; ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid; ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len; + init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq); return 0; } diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 0584e68e7593..4eb06cb7f883 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -774,6 +774,8 @@ enum wmi_tlv_event_id { WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL), WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL), WMI_SAP_OFL_DEL_STA_EVENTID, + WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID = + WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL), WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB), WMI_OCB_GET_TSF_TIMER_RESP_EVENTID, WMI_DCC_GET_STATS_RESP_EVENTID, @@ -2522,6 +2524,7 @@ struct ath11k_pdev_wmi { enum ath11k_htc_ep_id eid; const struct wmi_peer_flags_map *peer_flags; u32 rx_decap_mode; + wait_queue_head_t tx_ce_desc_wq; }; struct vdev_create_params { @@ -3617,6 +3620,7 @@ struct peer_assoc_params { u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET]; bool twt_responder; bool twt_requester; + bool is_assoc; struct ath11k_ppe_threshold peer_ppet; }; @@ -4914,6 +4918,13 @@ struct wmi_pdev_obss_pd_bitmap_cmd { #define ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS 10000 #define ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS 5000 +enum wmi_bss_color_collision { + WMI_BSS_COLOR_COLLISION_DISABLE = 0, + WMI_BSS_COLOR_COLLISION_DETECTION, + WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY, + WMI_BSS_COLOR_FREE_SLOT_AVAILABLE, +}; + struct wmi_obss_color_collision_cfg_params_cmd { u32 tlv_header; u32 vdev_id; @@ -4931,6 +4942,12 @@ struct wmi_bss_color_change_enable_params_cmd { u32 enable; } __packed; +struct wmi_obss_color_collision_event { + u32 vdev_id; + u32 evt_type; + u64 obss_color_bitmap; +} __packed; + #define ATH11K_IPV4_TH_SEED_SIZE 5 #define ATH11K_IPV6_TH_SEED_SIZE 11 @@ -5351,7 +5368,8 @@ int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr, u32 vdev_id, u32 param_id, u32 param_val); int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id, u32 param_value, u8 pdev_id); -int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable); +int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, + enum wmi_sta_ps_mode psmode); int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab); int ath11k_wmi_cmd_init(struct ath11k_base *ab); int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index ce9a0a53771e..fba5a847c3bb 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -120,7 +120,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, AR_ISR_TXEOL); } - ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); + ah->intr_txqs = MS(s0_s, AR_ISR_S0_QCU_TXOK); ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 7e27a06e5df1..dc24da1ff00b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -1005,24 +1005,20 @@ static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah, int i, int nmeasurement) { struct ath_common *common = ath9k_hw_common(ah); - int im, ix, iy, temp; + int im, ix, iy; for (im = 0; im < nmeasurement; im++) { for (ix = 0; ix < MAXIQCAL - 1; ix++) { for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) { if (coeff->mag_coeff[i][im][iy] < coeff->mag_coeff[i][im][ix]) { - temp = coeff->mag_coeff[i][im][ix]; - coeff->mag_coeff[i][im][ix] = - coeff->mag_coeff[i][im][iy]; - coeff->mag_coeff[i][im][iy] = temp; + swap(coeff->mag_coeff[i][im][ix], + coeff->mag_coeff[i][im][iy]); } if (coeff->phs_coeff[i][im][iy] < coeff->phs_coeff[i][im][ix]) { - temp = coeff->phs_coeff[i][im][ix]; - coeff->phs_coeff[i][im][ix] = - coeff->phs_coeff[i][im][iy]; - coeff->phs_coeff[i][im][iy] = temp; + swap(coeff->phs_coeff[i][im][ix], + coeff->phs_coeff[i][im][iy]); } } } diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index aff04ef66266..4e9e13941c8f 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -272,6 +272,21 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) return 0; } +static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) +{ + int reg_data = 0; + + wcn36xx_dxe_read_register(wcn, + WCN36XX_DXE_INT_MASK_REG, + ®_data); + + reg_data &= ~wcn_ch; + + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_INT_MASK_REG, + (int)reg_data); +} + static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl, gfp_t gfp) @@ -834,6 +849,53 @@ unlock: return ret; } +static bool _wcn36xx_dxe_tx_channel_is_empty(struct wcn36xx_dxe_ch *ch) +{ + unsigned long flags; + struct wcn36xx_dxe_ctl *ctl_bd_start, *ctl_skb_start; + struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb; + bool ret = true; + + spin_lock_irqsave(&ch->lock, flags); + + /* Loop through ring buffer looking for nonempty entries. */ + ctl_bd_start = ch->head_blk_ctl; + ctl_bd = ctl_bd_start; + ctl_skb_start = ctl_bd_start->next; + ctl_skb = ctl_skb_start; + do { + if (ctl_skb->skb) { + ret = false; + goto unlock; + } + ctl_bd = ctl_skb->next; + ctl_skb = ctl_bd->next; + } while (ctl_skb != ctl_skb_start); + +unlock: + spin_unlock_irqrestore(&ch->lock, flags); + return ret; +} + +int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn) +{ + int i = 0; + + /* Called with mac80211 queues stopped. Wait for empty HW queues. */ + do { + if (_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_l_ch) && + _wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_h_ch)) { + return 0; + } + /* This ieee80211_ops callback is specifically allowed to + * sleep. + */ + usleep_range(1000, 1100); + } while (++i < 100); + + return -EBUSY; +} + int wcn36xx_dxe_init(struct wcn36xx *wcn) { int reg_data = 0, ret; @@ -869,7 +931,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_WQ_TX_L); wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data); - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); /***************************************/ /* Init descriptors for TX HIGH channel */ @@ -893,9 +954,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); - /***************************************/ /* Init descriptors for RX LOW channel */ /***************************************/ @@ -905,7 +963,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) goto out_err_rxl_ch; } - /* For RX we need to preallocated buffers */ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch); @@ -928,9 +985,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_REG_CTL_RX_L, WCN36XX_DXE_CH_DEFAULT_CTL_RX_L); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); - /***************************************/ /* Init descriptors for RX HIGH channel */ /***************************************/ @@ -962,15 +1016,18 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_REG_CTL_RX_H, WCN36XX_DXE_CH_DEFAULT_CTL_RX_H); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); - ret = wcn36xx_dxe_request_irqs(wcn); if (ret < 0) goto out_err_irq; timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0); + /* Enable channel interrupts */ + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); + return 0; out_err_irq: @@ -987,6 +1044,14 @@ out_err_txh_ch: void wcn36xx_dxe_deinit(struct wcn36xx *wcn) { + int reg_data = 0; + + /* Disable channel interrupts */ + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); + free_irq(wcn->tx_irq, wcn); free_irq(wcn->rx_irq, wcn); del_timer(&wcn->tx_ack_timer); @@ -996,6 +1061,15 @@ void wcn36xx_dxe_deinit(struct wcn36xx *wcn) wcn->tx_ack_skb = NULL; } + /* Put the DXE block into reset before freeing memory */ + reg_data = WCN36XX_DXE_REG_RESET; + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data); + wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch); wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch); + + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch); } diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h index 31b81b7547a3..26a31edf52e9 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.h +++ b/drivers/net/wireless/ath/wcn36xx/dxe.h @@ -466,5 +466,6 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, struct wcn36xx_tx_bd *bd, struct sk_buff *skb, bool is_low); +int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn); void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status); #endif /* _DXE_H_ */ diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index b04533bbc3a4..0110f5062296 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -402,6 +402,7 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch) static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) { struct wcn36xx *wcn = hw->priv; + int ret; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed); @@ -417,17 +418,31 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) * want to receive/transmit regular data packets, then * simply stop the scan session and exit PS mode. */ - wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, - wcn->sw_scan_vif); - wcn->sw_scan_channel = 0; + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (wcn->sw_scan_init) { + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + } } else if (wcn->sw_scan) { /* A scan is ongoing, do not change the operating * channel, but start a scan session on the channel. */ - wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN, - wcn->sw_scan_vif); + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (!wcn->sw_scan_init) { + /* This can fail if we are unable to notify the + * operating channel. + */ + ret = wcn36xx_smd_init_scan(wcn, + HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + if (ret) { + mutex_unlock(&wcn->conf_mutex); + return -EIO; + } + } wcn36xx_smd_start_scan(wcn, ch); - wcn->sw_scan_channel = ch; } else { wcn36xx_change_opchannel(wcn, ch); } @@ -707,6 +722,8 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw, struct wcn36xx *wcn = hw->priv; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); + wcn36xx_dbg(WCN36XX_DBG_MAC, "sw_scan_start"); + wcn->sw_scan = true; wcn->sw_scan_vif = vif; wcn->sw_scan_channel = 0; @@ -721,8 +738,15 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw, { struct wcn36xx *wcn = hw->priv; + wcn36xx_dbg(WCN36XX_DBG_MAC, "sw_scan_complete"); + /* ensure that any scan session is finished */ - wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif); + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (wcn->sw_scan_init) { + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + } wcn->sw_scan = false; wcn->sw_scan_opchannel = 0; } @@ -1277,6 +1301,16 @@ static void wcn36xx_ipv6_addr_change(struct ieee80211_hw *hw, } #endif +static void wcn36xx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct wcn36xx *wcn = hw->priv; + + if (wcn36xx_dxe_tx_flush(wcn)) { + wcn36xx_err("Failed to flush hardware tx queues\n"); + } +} + static const struct ieee80211_ops wcn36xx_ops = { .start = wcn36xx_start, .stop = wcn36xx_stop, @@ -1304,6 +1338,7 @@ static const struct ieee80211_ops wcn36xx_ops = { #if IS_ENABLED(CONFIG_IPV6) .ipv6_addr_change = wcn36xx_ipv6_addr_change, #endif + .flush = wcn36xx_flush, CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd) }; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index ed45e2cf039b..d3285a504429 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -722,6 +722,7 @@ int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode, wcn36xx_err("hal_init_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_init = true; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -752,6 +753,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel) wcn36xx_err("hal_start_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_channel = scan_channel; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -782,6 +784,7 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel) wcn36xx_err("hal_end_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_channel = 0; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -823,6 +826,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, wcn36xx_err("hal_finish_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_init = false; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -2732,7 +2736,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn, wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n", tmp->bss_index); vif = wcn36xx_priv_to_vif(tmp); - ieee80211_connection_loss(vif); + ieee80211_beacon_loss(vif); } return 0; } @@ -2747,7 +2751,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn, wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n", rsp->bss_index); vif = wcn36xx_priv_to_vif(tmp); - ieee80211_connection_loss(vif); + ieee80211_beacon_loss(vif); return 0; } } diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 75951ccbc840..dd58dde8c836 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -272,7 +272,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) const struct wcn36xx_rate *rate; struct ieee80211_hdr *hdr; struct wcn36xx_rx_bd *bd; - struct ieee80211_supported_band *sband; u16 fc, sn; /* @@ -314,8 +313,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) fc = __le16_to_cpu(hdr->frame_control); sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); - status.freq = WCN36XX_CENTER_FREQ(wcn); - status.band = WCN36XX_BAND(wcn); status.mactime = 10; status.signal = -get_rssi0(bd); status.antenna = 1; @@ -327,18 +324,36 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag); + if (bd->scan_learn) { + /* If packet originate from hardware scanning, extract the + * band/channel from bd descriptor. + */ + u8 hwch = (bd->reserved0 << 4) + bd->rx_ch; + + if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) { + status.band = NL80211_BAND_5GHZ; + status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1], + status.band); + } else { + status.band = NL80211_BAND_2GHZ; + status.freq = ieee80211_channel_to_frequency(hwch, status.band); + } + } else { + status.band = WCN36XX_BAND(wcn); + status.freq = WCN36XX_CENTER_FREQ(wcn); + } + if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) { rate = &wcn36xx_rate_table[bd->rate_id]; status.encoding = rate->encoding; status.enc_flags = rate->encoding_flags; status.bw = rate->bw; status.rate_idx = rate->mcs_or_legacy_index; - sband = wcn->hw->wiphy->bands[status.band]; status.nss = 1; if (status.band == NL80211_BAND_5GHZ && status.encoding == RX_ENC_LEGACY && - status.rate_idx >= sband->n_bitrates) { + status.rate_idx >= 4) { /* no dsss rates in 5Ghz rates table */ status.rate_idx -= 4; } @@ -353,22 +368,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) ieee80211_is_probe_resp(hdr->frame_control)) status.boottime_ns = ktime_get_boottime_ns(); - if (bd->scan_learn) { - /* If packet originates from hardware scanning, extract the - * band/channel from bd descriptor. - */ - u8 hwch = (bd->reserved0 << 4) + bd->rx_ch; - - if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) { - status.band = NL80211_BAND_5GHZ; - status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1], - status.band); - } else { - status.band = NL80211_BAND_2GHZ; - status.freq = ieee80211_channel_to_frequency(hwch, status.band); - } - } - memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); if (ieee80211_is_beacon(hdr->frame_control)) { diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 1c8d918137da..fbd0558c2c19 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -248,6 +248,7 @@ struct wcn36xx { struct cfg80211_scan_request *scan_req; bool sw_scan; u8 sw_scan_opchannel; + bool sw_scan_init; u8 sw_scan_channel; struct ieee80211_vif *sw_scan_vif; struct mutex scan_lock; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index fb727778312c..1679361f187b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3901,6 +3901,24 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg, cfg->wowl.active = true; } +static int brcmf_keepalive_start(struct brcmf_if *ifp, unsigned int interval) +{ + struct brcmf_mkeep_alive_pkt_le kalive = {0}; + int ret = 0; + + /* Configure Null function/data keepalive */ + kalive.version = cpu_to_le16(1); + kalive.period_msec = cpu_to_le16(interval * MSEC_PER_SEC); + kalive.len_bytes = cpu_to_le16(0); + kalive.keep_alive_id = cpu_to_le16(0); + + ret = brcmf_fil_iovar_data_set(ifp, "mkeep_alive", &kalive, sizeof(kalive)); + if (ret) + brcmf_err("keep-alive packet config failed, ret=%d\n", ret); + + return ret; +} + static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowl) { @@ -3947,6 +3965,9 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, } else { /* Configure WOWL paramaters */ brcmf_configure_wowl(cfg, ifp, wowl); + + /* Prevent disassociation due to inactivity with keep-alive */ + brcmf_keepalive_start(ifp, 30); } exit: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index ff2ef557f0ea..e69d1e56996f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -1052,4 +1052,23 @@ struct brcmf_gscan_config { struct brcmf_gscan_bucket_config bucket[1]; }; +/** + * struct brcmf_mkeep_alive_pkt_le - configuration data for keep-alive frame. + * + * @version: version for mkeep_alive + * @length: length of fixed parameters in the structure. + * @period_msec: keep-alive period in milliseconds. + * @len_bytes: size of the data. + * @keep_alive_id: ID (0 - 3). + * @data: keep-alive frame data. + */ +struct brcmf_mkeep_alive_pkt_le { + __le16 version; + __le16 length; + __le32 period_msec; + __le16 len_bytes; + u8 keep_alive_id; + u8 data[0]; +} __packed; + #endif /* FWIL_TYPES_H_ */ diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 1085afbefba8..337428ef2e67 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -92,6 +92,32 @@ config IWLWIFI_BCAST_FILTERING If unsure, don't enable this option, as some programs might expect incoming broadcasts for their normal operations. +config IWLMEI + tristate "Intel Management Engine communication over WLAN" + depends on INTEL_MEI + depends on PM + depends on IWLMVM + help + Enables the iwlmei kernel module. + + CSME stands for Converged Security and Management Engine. It is a CPU + on the chipset and runs a dedicated firmware. AMT (Active Management + Technology) is one of the applications that run on that CPU. AMT + allows to control the platform remotely. + + This kernel module allows to communicate with the Intel Management + Engine over Wifi. This is supported starting from Tiger Lake + platforms and has been tested on 9260 devices only. + If AMT is configured not to use the wireless device, this module is + harmless (and useless). + Enabling this option on a platform that has a different device and + has Wireless enabled on AMT can prevent WiFi from working correctly. + + For more information see + <https://software.intel.com/en-us/manageability/> + + If unsure, say N. + menu "Debugging Options" config IWLWIFI_DEBUG diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 0d4656efe908..75a703eb1bdf 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -30,5 +30,6 @@ ccflags-y += -I$(src) obj-$(CONFIG_IWLDVM) += dvm/ obj-$(CONFIG_IWLMVM) += mvm/ +obj-$(CONFIG_IWLMEI) += mei/ CFLAGS_iwl-devtrace.o := -I$(src) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index c875bf35533c..009dd4be597b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -86,6 +86,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-EINVAL); goto out; } @@ -105,6 +106,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, IWL_DEBUG_FW(trans, "Couldn't allocate (more) reduce_power_data\n"); + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-ENOMEM); goto out; } @@ -134,6 +136,10 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, done: if (!size) { IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n"); + /* Better safe than sorry, but 'reduce_power_data' should + * always be NULL if !size. + */ + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-ENOENT); goto out; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 36196e07b1a0..5cec467b995b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1313,23 +1313,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) const struct iwl_op_mode_ops *ops = op->ops; struct dentry *dbgfs_dir = NULL; struct iwl_op_mode *op_mode = NULL; + int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY; + + for (retry = 0; retry <= max_retry; retry++) { #ifdef CONFIG_IWLWIFI_DEBUGFS - drv->dbgfs_op_mode = debugfs_create_dir(op->name, - drv->dbgfs_drv); - dbgfs_dir = drv->dbgfs_op_mode; + drv->dbgfs_op_mode = debugfs_create_dir(op->name, + drv->dbgfs_drv); + dbgfs_dir = drv->dbgfs_op_mode; #endif - op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir); + op_mode = ops->start(drv->trans, drv->trans->cfg, + &drv->fw, dbgfs_dir); + + if (op_mode) + return op_mode; + + IWL_ERR(drv, "retry init count %d\n", retry); #ifdef CONFIG_IWLWIFI_DEBUGFS - if (!op_mode) { debugfs_remove_recursive(drv->dbgfs_op_mode); drv->dbgfs_op_mode = NULL; - } #endif + } - return op_mode; + return NULL; } static void _iwl_op_mode_stop(struct iwl_drv *drv) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 2e2d60a58692..0fd009e6d685 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -89,4 +89,7 @@ void iwl_drv_stop(struct iwl_drv *drv); #define IWL_EXPORT_SYMBOL(sym) #endif +/* max retry for init flow */ +#define IWL_MAX_INIT_RETRY 2 + #endif /* __iwl_drv_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index f470f9aea50f..0cd4718372f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -22,6 +22,7 @@ #include "fw/api/commands.h" #include "fw/api/cmdhdr.h" #include "fw/img.h" +#include "mei/iwl-mei.h" /* NVM offsets (in words) definitions */ enum nvm_offsets { @@ -1115,6 +1116,66 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, } struct iwl_nvm_data * +iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_mei_nvm *mei_nvm, + const struct iwl_fw *fw) +{ + struct iwl_nvm_data *data; + u32 sbands_flags = 0; + u8 rx_chains = fw->valid_rx_ant; + u8 tx_chains = fw->valid_rx_ant; + + if (cfg->uhb_supported) + data = kzalloc(struct_size(data, channels, + IWL_NVM_NUM_CHANNELS_UHB), + GFP_KERNEL); + else + data = kzalloc(struct_size(data, channels, + IWL_NVM_NUM_CHANNELS_EXT), + GFP_KERNEL); + if (!data) + return NULL; + + BUILD_BUG_ON(ARRAY_SIZE(mei_nvm->channels) != + IWL_NVM_NUM_CHANNELS_UHB); + data->nvm_version = mei_nvm->nvm_version; + + iwl_set_radio_cfg(cfg, data, mei_nvm->radio_cfg); + if (data->valid_tx_ant) + tx_chains &= data->valid_tx_ant; + if (data->valid_rx_ant) + rx_chains &= data->valid_rx_ant; + + data->sku_cap_mimo_disabled = false; + data->sku_cap_band_24ghz_enable = true; + data->sku_cap_band_52ghz_enable = true; + data->sku_cap_11n_enable = + !(iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL); + data->sku_cap_11ac_enable = true; + data->sku_cap_11ax_enable = + mei_nvm->caps & MEI_NVM_CAPS_11AX_SUPPORT; + + data->lar_enabled = mei_nvm->caps & MEI_NVM_CAPS_LARI_SUPPORT; + + data->n_hw_addrs = mei_nvm->n_hw_addrs; + /* If no valid mac address was found - bail out */ + if (iwl_set_hw_address(trans, cfg, data, NULL, NULL)) { + kfree(data); + return NULL; + } + + if (data->lar_enabled && + fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) + sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; + + iwl_init_sbands(trans, data, mei_nvm->channels, tx_chains, rx_chains, + sbands_flags, true, fw); + + return data; +} +IWL_EXPORT_SYMBOL(iwl_parse_mei_nvm_data); + +struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index e1f5a9741850..e01f7751cf11 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2015, 2018-2020 Intel Corporation + * Copyright (C) 2005-2015, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_nvm_parse_h__ @@ -8,6 +8,7 @@ #include <net/cfg80211.h> #include "iwl-eeprom-parse.h" +#include "mei/iwl-mei.h" /** * enum iwl_nvm_sbands_flags - modification flags for the channel profiles @@ -81,4 +82,12 @@ void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, const struct iwl_fw *fw); +/** + * iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data + */ +struct iwl_nvm_data * +iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_mei_nvm *mei_nvm, + const struct iwl_fw *fw); + #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 4ebb1871bd1f..a4060af50201 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -924,6 +924,7 @@ struct iwl_trans_txqs { /** * struct iwl_trans - transport common data * + * @csme_own - true if we couldn't get ownership on the device * @ops - pointer to iwl_trans_ops * @op_mode - pointer to the op_mode * @trans_cfg: the trans-specific configuration part @@ -958,6 +959,7 @@ struct iwl_trans_txqs { * @iwl_trans_txqs: transport tx queues data. */ struct iwl_trans { + bool csme_own; const struct iwl_trans_ops *ops; struct iwl_op_mode *op_mode; const struct iwl_cfg_trans_params *trans_cfg; diff --git a/drivers/net/wireless/intel/iwlwifi/mei/Makefile b/drivers/net/wireless/intel/iwlwifi/mei/Makefile new file mode 100644 index 000000000000..8e3ef0347db7 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_IWLMEI) += iwlmei.o +iwlmei-y += main.o +iwlmei-y += net.o +iwlmei-$(CONFIG_IWLWIFI_DEVICE_TRACING) += trace.o +CFLAGS_trace.o := -I$(src) + +ccflags-y += -I $(srctree)/$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/internal.h b/drivers/net/wireless/intel/iwlwifi/mei/internal.h new file mode 100644 index 000000000000..92fea7dd71e2 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/internal.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#ifndef __IWLMEI_INTERNAL_H_ +#define __IWLMEI_INTERNAL_H_ + +#include <uapi/linux/if_ether.h> +#include <linux/netdevice.h> + +#include "sap.h" + +rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + bool *pass_to_csme); + +void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx); + +#endif /* __IWLMEI_INTERNAL_H_ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h new file mode 100644 index 000000000000..67122cfa2292 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h @@ -0,0 +1,505 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#ifndef __iwl_mei_h__ +#define __iwl_mei_h__ + +#include <linux/if_ether.h> +#include <linux/skbuff.h> +#include <linux/ieee80211.h> + +/** + * DOC: Introduction + * + * iwlmei is the kernel module that is in charge of the commnunication between + * the iwlwifi driver and the CSME firmware's WLAN driver. This communication + * uses the SAP protocol defined in another file. + * iwlwifi can request or release ownership on the WiFi device through iwlmei. + * iwlmei may notify iwlwifi about certain events: what filter iwlwifi should + * use to passthrough inbound packets to the CSME firmware for example. iwlmei + * may also use iwlwifi to send traffic. This means that we need communication + * from iwlmei to iwlwifi and the other way around. + */ + +/** + * DOC: Life cycle + * + * iwlmei exports symbols that are needed by iwlwifi so that iwlmei will always + * be loaded when iwlwifi is alive. iwlwifi registers itself to iwlmei and + * provides the pointers to the functions that iwlmei calls whenever needed. + * iwlwifi calls iwlmei through direct and context-free function calls. + * It is assumed that only one device is accessible to the CSME firmware and + * under the scope of iwlmei so that it is valid not to have any context passed + * to iwlmei's functions. + * + * There are cases in which iwlmei can't access the CSME firmware, because the + * CSME firmware is undergoing a reset, or the mei bus decided to unbind the + * device. In those cases, iwlmei will need not to send requests over the mei + * bus. Instead, it needs to cache the requests from iwlwifi and fulfill them + * when the mei bus is available again. + * + * iwlmei can call iwlwifi as long as iwlwifi is registered to iwlmei. When + * iwlwifi goes down (the PCI device is unbound, or the iwlwifi is unloaded) + * iwlwifi needs to unregister from iwlmei. + */ + +/** + * DOC: Memory layout + * + * Since iwlwifi calls iwlmei without any context, iwlmei needs to hold a + * global pointer to its data (which is in the mei client device's private + * data area). If there was no bind on the mei bus, this pointer is NULL and + * iwlmei knows not access to the CSME firmware upon requests from iwlwifi. + * + * iwlmei needs to cache requests from iwlwifi when there is no mei client + * device available (when iwlmei has been removed from the mei bus). In this + * case, all iwlmei's data that resides in the mei client device's private data + * area is unavailable. For this specific case, a separate caching area is + * needed. + */ + +/** + * DOC: Concurrency + * + * iwlwifi can call iwlmei at any time. iwlmei will take care to synchronize + * the calls from iwlwifi with its internal flows. iwlwifi must not call iwlmei + * in flows that cannot sleep. Moreover, iwlwifi must not call iwlmei in flows + * that originated from iwlmei. + */ + +/** + * DOC: Probe and remove from mei bus driver + * + * When the mei bus driver enumerates its devices, it calls the iwlmei's probe + * function which will send the %SAP_ME_MSG_START message. The probe completes + * before the response (%SAP_ME_MSG_START_OK) is received. This response will + * be handle by the Rx path. Once it arrives, the connection to the CSME + * firmware is considered established and iwlwifi's requests can be treated + * against the CSME firmware. + * + * When the mei bus driver removes the device, iwlmei loses all the data that + * was attached to the mei client device. It clears the global pointer to the + * mei client device since it is not available anymore. This will cause all the + * requests coming from iwlwifi to be cached. This flow takes the global mutex + * to be synchronized with all the requests coming from iwlwifi. + */ + +/** + * DOC: Driver load when CSME owns the device + * + * When the driver (iwlwifi) is loaded while CSME owns the device, + * it'll ask CSME to release the device through HW registers. CSME + * will release the device only in the case that there is no connection + * through the mei bus. If there is a mei bus connection, CSME will refuse + * to release the ownership on the device through the HW registers. In that + * case, iwlwifi must first request ownership using the SAP protocol. + * + * Once iwlwifi will request ownership through the SAP protocol, CSME will + * grant the ownership on the device through the HW registers as well. + * In order to request ownership over SAP, we first need to have an interface + * which means that we need to register to mac80211. + * This can't happen before we get the NVM that contains all the capabilities + * of the device. Reading the NVM usually requires the load the firmware, but + * this is impossible as long as we don't have ownership on the device. + * In order to solve this chicken and egg problem, the host driver can get + * the NVM through CSME which owns the device. It can send + * %SAP_MSG_NOTIF_GET_NVM, which will be replied by %SAP_MSG_NOTIF_NVM with + * the NVM's content that the host driver needs. + */ + +/** + * DOC: CSME behavior regarding the ownership requests + * + * The ownership requests from the host can come in two different ways: + * - the HW registers in iwl_pcie_set_hw_ready + * - using the Software Arbitration Protocol (SAP) + * + * The host can ask CSME who owns the device with %SAP_MSG_NOTIF_WHO_OWNS_NIC, + * and it can request ownership with %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP. + * The host will first use %SAP_MSG_NOTIF_WHO_OWNS_NIC to know what state + * CSME is in. In case CSME thinks it owns the device, the host can ask for + * ownership with %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP. + * + * Here the table that describes CSME's behavior upon ownership request: + * + * +-------------------+------------+--------------+-----------------------------+------------+ + * | State | HW reg bit | Reply for | Event | HW reg bit | + * | | before | WHO_OWNS_NIC | | after | + * +===================+============+==============+=============================+============+ + * | WiAMT not | 0 | Host | HW register or | 0 | + * | operational | Host owner | | HOST_ASKS_FOR_NIC_OWNERSHIP | Host owner | + * +-------------------+------------+--------------+-----------------------------+------------+ + * | Operational & | 1 | N/A | HW register | 0 | + * | SAP down & | CSME owner | | | Host owner | + * | no session active | | | | | + * +-------------------+------------+--------------+-----------------------------+------------+ + * | Operational & | 1 | CSME | HW register | 1 | + * | SAP up | CSME owner | | | CSME owner | + * +-------------------+------------+--------------+-----------------------------+------------+ + * | Operational & | 1 | CSME | HOST_ASKS_FOR_NIC_OWNERSHIP | 0 | + * | SAP up | CSME owner | | | Host owner | + * +-------------------+------------+--------------+-----------------------------+------------+ + */ + +/** + * DOC: Driver load when CSME is associated and a session is active + * + * A "session" is active when CSME is associated to an access point and the + * link is used to attach a remote driver or to control the system remotely. + * When a session is active, we want to make sure it won't disconnect when we + * take ownership on the device. + * In this case, the driver can get the device, but it'll need to make + * sure that it'll connect to the exact same AP (same BSSID). + * In order to do so, CSME will send the connection parameters through + * SAP and then the host can check if it can connect to this same AP. + * If yes, it can request ownership through SAP and connect quickly without + * scanning all the channels, but just probing the AP on the channel that + * CSME was connected to. + * In order to signal this specific scenario to iwlwifi, iwlmei will + * immediately require iwlwifi to report RF-Kill to the network stack. This + * RF-Kill will prevent the stack from getting the device, and it has a reason + * that tells the userspace that the device is in RF-Kill because it is not + * owned by the host. Once the userspace has configured the right profile, + * it'll be able to let iwlmei know that it can request ownership over SAP + * which will remove the RF-Kill, and finally allow the host to connect. + * The host has then 3 seconds to connect (including DHCP). Had the host + * failed to connect within those 3 seconds, CSME will take the device back. + */ + +/** + * DOC: Datapath + * + * CSME can transmit packets, through the netdev that it gets from the wifi + * driver. It'll send packet in the 802.3 format and simply call + * dev_queue_xmit. + * + * For Rx, iwlmei registers a Rx handler that it attaches to the netdev. iwlmei + * may catch packets and send them to CSME, it can then either drop them so + * that they are invisible to user space, or let them go the user space. + * + * Packets transmitted by the user space do not need to be forwarded to CSME + * with the exception of the DHCP request. In order to know what IP is used + * by the user space, CSME needs to get the DHCP request. See + * iwl_mei_tx_copy_to_csme(). + */ + +/** + * enum iwl_mei_nvm_caps - capabilities for MEI NVM + * @MEI_NVM_CAPS_LARI_SUPPORT: Lari is supported + * @MEI_NVM_CAPS_11AX_SUPPORT: 11AX is supported + */ +enum iwl_mei_nvm_caps { + MEI_NVM_CAPS_LARI_SUPPORT = BIT(0), + MEI_NVM_CAPS_11AX_SUPPORT = BIT(1), +}; + +/** + * struct iwl_mei_nvm - used to pass the NVM from CSME + * @hw_addr: The MAC address + * @n_hw_addrs: The number of MAC addresses + * @reserved: For alignment. + * @radio_cfg: The radio configuration. + * @caps: See &enum iwl_mei_nvm_caps. + * @nvm_version: The version of the NVM. + * @channels: The data for each channel. + * + * If a field is added, it must correspond to the SAP structure. + */ +struct iwl_mei_nvm { + u8 hw_addr[ETH_ALEN]; + u8 n_hw_addrs; + u8 reserved; + u32 radio_cfg; + u32 caps; + u32 nvm_version; + u32 channels[110]; +}; + +/** + * enum iwl_mei_pairwise_cipher - cipher for UCAST key + * @IWL_MEI_CIPHER_NONE: none + * @IWL_MEI_CIPHER_CCMP: ccmp + * @IWL_MEI_CIPHER_GCMP: gcmp + * @IWL_MEI_CIPHER_GCMP_256: gcmp 256 + * + * Note that those values are dictated by the CSME firmware API (see sap.h) + */ +enum iwl_mei_pairwise_cipher { + IWL_MEI_CIPHER_NONE = 0, + IWL_MEI_CIPHER_CCMP = 4, + IWL_MEI_CIPHER_GCMP = 8, + IWL_MEI_CIPHER_GCMP_256 = 9, +}; + +/** + * enum iwl_mei_akm_auth - a combination of AKM and AUTH method + * @IWL_MEI_AKM_AUTH_OPEN: No encryption + * @IWL_MEI_AKM_AUTH_RSNA: 1X profile + * @IWL_MEI_AKM_AUTH_RSNA_PSK: PSK profile + * @IWL_MEI_AKM_AUTH_SAE: SAE profile + * + * Note that those values are dictated by the CSME firmware API (see sap.h) + */ +enum iwl_mei_akm_auth { + IWL_MEI_AKM_AUTH_OPEN = 0, + IWL_MEI_AKM_AUTH_RSNA = 6, + IWL_MEI_AKM_AUTH_RSNA_PSK = 7, + IWL_MEI_AKM_AUTH_SAE = 9, +}; + +/** + * struct iwl_mei_conn_info - connection info + * @lp_state: link protection state + * @auth_mode: authentication mode + * @ssid_len: the length of SSID + * @ssid: the SSID + * @pairwise_cipher: the cipher used for unicast packets + * @channel: the associated channel + * @band: the associated band + * @bssid: the BSSID + */ +struct iwl_mei_conn_info { + u8 lp_state; + u8 auth_mode; + u8 ssid_len; + u8 channel; + u8 band; + u8 pairwise_cipher; + u8 bssid[ETH_ALEN]; + u8 ssid[IEEE80211_MAX_SSID_LEN]; +}; + +/** + * struct iwl_mei_colloc_info - collocated AP info + * @channel: the channel of the collocated AP + * @bssid: the BSSID of the collocated AP + */ +struct iwl_mei_colloc_info { + u8 channel; + u8 bssid[ETH_ALEN]; +}; + +/* + * struct iwl_mei_ops - driver's operations called by iwlmei + * Operations will not be called more than once concurrently. + * It's not allowed to call iwlmei functions from this context. + * + * @me_conn_status: provide information about CSME's current connection. + * @rfkill: called when the wifi driver should report a change in the rfkill + * status. + * @roaming_forbidden: indicates whether roaming is forbidden. + * @sap_connected: indicate that SAP is now connected. Will be called in case + * the wifi driver registered to iwlmei before SAP connection succeeded or + * when the SAP connection is re-established. + * @nic_stolen: this means that device is no longer available. The device can + * still be used until the callback returns. + */ +struct iwl_mei_ops { + void (*me_conn_status)(void *priv, + const struct iwl_mei_conn_info *conn_info); + void (*rfkill)(void *priv, bool blocked); + void (*roaming_forbidden)(void *priv, bool forbidden); + void (*sap_connected)(void *priv); + void (*nic_stolen)(void *priv); +}; + +#if IS_ENABLED(CONFIG_IWLMEI) + +/** + * iwl_mei_is_connected() - is the connection to the CSME firmware established? + * + * Return: true if we have a SAP connection + */ +bool iwl_mei_is_connected(void); + +/** + * iwl_mei_get_nvm() - returns the NVM for the device + * + * It is the caller's responsibility to free the memory returned + * by this function. + * This function blocks (sleeps) until the NVM is ready. + * + * Return: the NVM as received from CSME + */ +struct iwl_mei_nvm *iwl_mei_get_nvm(void); + +/** + * iwl_mei_get_ownership() - request ownership + * + * This function blocks until ownership is granted or timeout expired. + * + * Return: 0 in case we could get ownership on the device + */ +int iwl_mei_get_ownership(void); + +/** + * iwl_mei_set_rfkill_state() - set SW and HW RF kill states + * @hw_rfkill: HW RF kill state. + * @sw_rfkill: SW RF kill state. + * + * This function must be called when SW RF kill is issued by the user. + */ +void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill); + +/** + * iwl_mei_set_nic_info() - set mac address + * @mac_address: mac address to set + * @nvm_address: NVM mac adsress to set + * + * This function must be called upon mac address change. + */ +void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address); + +/** + * iwl_mei_set_country_code() - set new country code + * @mcc: the new applied MCC + * + * This function must be called upon country code update + */ +void iwl_mei_set_country_code(u16 mcc); + +/** + * iwl_mei_set_power_limit() - set TX power limit + * @power_limit: pointer to an array of 10 elements (le16) represents the power + * restrictions per chain. + * + * This function must be called upon power restrictions change + */ +void iwl_mei_set_power_limit(const __le16 *power_limit); + +/** + * iwl_mei_register() - register the wifi driver to iwlmei + * @priv: a pointer to the wifi driver's context. Cannot be NULL. + * @ops: the ops structure. + * + * Return: 0 unless something went wrong. It is illegal to call any + * other API function before this function is called and succeeds. + * + * Only one wifi driver instance (wifi device instance really) + * can register at a time. + */ +int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops); + +/** + * iwl_mei_start_unregister() - unregister the wifi driver from iwlmei + * + * From this point on, iwlmei will not used the callbacks provided by + * the driver, but the device is still usable. + */ +void iwl_mei_start_unregister(void); + +/** + * iwl_mei_unregister_complete() - complete the unregistration + * + * Must be called after iwl_mei_start_unregister. When this function returns, + * the device is owned by CSME. + */ +void iwl_mei_unregister_complete(void); + +/** + * iwl_mei_set_netdev() - sets the netdev for Tx / Rx. + * @netdev: the net_device + * + * The caller should set the netdev to a non-NULL value when the + * interface is added. Packets might be sent to the driver immediately + * afterwards. + * The caller should set the netdev to NULL when the interface is removed. + * This function will call synchronize_net() after setting the netdev to NULL. + * Only when this function returns, can the caller assume that iwlmei will + * no longer inject packets into the netdev's Tx path. + * + * Context: This function can sleep and assumes rtnl_lock is taken. + * The netdev must be set to NULL before iwl_mei_start_unregister() is called. + */ +void iwl_mei_set_netdev(struct net_device *netdev); + +/** + * iwl_mei_tx_copy_to_csme() - must be called for each packet sent by + * the wifi driver. + * @skb: the skb sent + * @ivlen: the size of the IV that needs to be skipped after the MAC and + * before the SNAP header. + * + * This function doesn't take any lock, it simply tries to catch DHCP + * packets sent by the wifi driver. If the packet is a DHCP packet, it + * will send it to CSME. This function must not be called for virtual + * interfaces that are not monitored by CSME, meaning it must be called + * only for packets transmitted by the netdevice that was registered + * with iwl_mei_set_netdev(). + */ +void iwl_mei_tx_copy_to_csme(struct sk_buff *skb, unsigned int ivlen); + +/** + * iwl_mei_host_associated() - must be called when iwlwifi associated. + * @conn_info: pointer to the connection info structure. + * @colloc_info: pointer to the collocated AP info. This is relevant only in + * case of UHB associated AP, otherwise set to NULL. + */ +void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info, + const struct iwl_mei_colloc_info *colloc_info); + +/** + * iwl_mei_host_disassociated() - must be called when iwlwifi disassociated. + */ +void iwl_mei_host_disassociated(void); + +/** + * iwl_mei_device_down() - must be called when the device is down + */ +void iwl_mei_device_down(void); + +#else + +static inline bool iwl_mei_is_connected(void) +{ return false; } + +static inline struct iwl_mei_nvm *iwl_mei_get_nvm(void) +{ return NULL; } + +static inline int iwl_mei_get_ownership(void) +{ return 0; } + +static inline void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill) +{} + +static inline void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address) +{} + +static inline void iwl_mei_set_country_code(u16 mcc) +{} + +static inline void iwl_mei_set_power_limit(__le16 *power_limit) +{} + +static inline int iwl_mei_register(void *priv, + const struct iwl_mei_ops *ops) +{ return 0; } + +static inline void iwl_mei_start_unregister(void) +{} + +static inline void iwl_mei_unregister_complete(void) +{} + +static inline void iwl_mei_set_netdev(struct net_device *netdev) +{} + +static inline void iwl_mei_tx_copy_to_csme(struct sk_buff *skb, + unsigned int ivlen) +{} + +static inline void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info, + const struct iwl_mei_colloc_info *colloc_info) +{} + +static inline void iwl_mei_host_disassociated(void) +{} + +static inline void iwl_mei_device_down(void) +{} + +#endif /* CONFIG_IWLMEI */ + +#endif /* __iwl_mei_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c new file mode 100644 index 000000000000..112cc362e8e7 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c @@ -0,0 +1,1982 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/ieee80211.h> +#include <linux/rtnetlink.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/mei_cl_bus.h> +#include <linux/rcupdate.h> +#include <linux/debugfs.h> +#include <linux/skbuff.h> +#include <linux/wait.h> +#include <linux/slab.h> +#include <linux/mm.h> + +#include <net/cfg80211.h> + +#include "internal.h" +#include "iwl-mei.h" +#include "trace.h" +#include "trace-data.h" +#include "sap.h" + +MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface"); +MODULE_LICENSE("GPL"); + +#define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \ + 0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65) + +/* + * Since iwlwifi calls iwlmei without any context, hold a pointer to the + * mei_cl_device structure here. + * Define a mutex that will synchronize all the flows between iwlwifi and + * iwlmei. + * Note that iwlmei can't have several instances, so it ok to have static + * variables here. + */ +static struct mei_cl_device *iwl_mei_global_cldev; +static DEFINE_MUTEX(iwl_mei_mutex); +static unsigned long iwl_mei_status; + +enum iwl_mei_status_bits { + IWL_MEI_STATUS_SAP_CONNECTED, +}; + +bool iwl_mei_is_connected(void) +{ + return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); +} +EXPORT_SYMBOL_GPL(iwl_mei_is_connected); + +#define SAP_VERSION 3 +#define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */ + +struct iwl_sap_q_ctrl_blk { + __le32 wr_ptr; + __le32 rd_ptr; + __le32 size; +}; + +enum iwl_sap_q_idx { + SAP_QUEUE_IDX_NOTIF = 0, + SAP_QUEUE_IDX_DATA, + SAP_QUEUE_IDX_MAX, +}; + +struct iwl_sap_dir { + __le32 reserved; + struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX]; +}; + +enum iwl_sap_dir_idx { + SAP_DIRECTION_HOST_TO_ME = 0, + SAP_DIRECTION_ME_TO_HOST, + SAP_DIRECTION_MAX, +}; + +struct iwl_sap_shared_mem_ctrl_blk { + __le32 sap_id; + __le32 size; + struct iwl_sap_dir dir[SAP_DIRECTION_MAX]; +}; + +/* + * The shared area has the following layout: + * + * +-----------------------------------+ + * |struct iwl_sap_shared_mem_ctrl_blk | + * +-----------------------------------+ + * |Host -> ME data queue | + * +-----------------------------------+ + * |Host -> ME notif queue | + * +-----------------------------------+ + * |ME -> Host data queue | + * +-----------------------------------+ + * |ME -> host notif queue | + * +-----------------------------------+ + * |SAP control block id (SAP!) | + * +-----------------------------------+ + */ + +#define SAP_H2M_DATA_Q_SZ 48256 +#define SAP_M2H_DATA_Q_SZ 24128 +#define SAP_H2M_NOTIF_Q_SZ 2240 +#define SAP_M2H_NOTIF_Q_SZ 62720 + +#define _IWL_MEI_SAP_SHARED_MEM_SZ \ + (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \ + SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \ + SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4) + +#define IWL_MEI_SAP_SHARED_MEM_SZ \ + (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE)) + +struct iwl_mei_shared_mem_ptrs { + struct iwl_sap_shared_mem_ctrl_blk *ctrl; + void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX]; +}; + +struct iwl_mei_filters { + struct rcu_head rcu_head; + struct iwl_sap_oob_filters filters; +}; + +/** + * struct iwl_mei - holds the private date for iwl_mei + * + * @get_nvm_wq: the wait queue for the get_nvm flow + * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA + * message. Used so that we can send CHECK_SHARED_AREA from atomic + * contexts. + * @get_ownership_wq: the wait queue for the get_ownership_flow + * @shared_mem: the memory that is shared between CSME and the host + * @cldev: the pointer to the MEI client device + * @nvm: the data returned by the CSME for the NVM + * @filters: the filters sent by CSME + * @got_ownership: true if we own the device + * @amt_enabled: true if CSME has wireless enabled + * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI + * bus, but rather need to wait until send_csa_msg_wk runs + * @csme_taking_ownership: true when CSME is taking ownership. Used to remember + * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down + * flow. + * @csa_throttle_end_wk: used when &csa_throttled is true + * @data_q_lock: protects the access to the data queues which are + * accessed without the mutex. + * @sap_seq_no: the sequence number for the SAP messages + * @seq_no: the sequence number for the SAP messages + * @dbgfs_dir: the debugfs dir entry + */ +struct iwl_mei { + wait_queue_head_t get_nvm_wq; + struct work_struct send_csa_msg_wk; + wait_queue_head_t get_ownership_wq; + struct iwl_mei_shared_mem_ptrs shared_mem; + struct mei_cl_device *cldev; + struct iwl_mei_nvm *nvm; + struct iwl_mei_filters __rcu *filters; + bool got_ownership; + bool amt_enabled; + bool csa_throttled; + bool csme_taking_ownership; + struct delayed_work csa_throttle_end_wk; + spinlock_t data_q_lock; + + atomic_t sap_seq_no; + atomic_t seq_no; + + struct dentry *dbgfs_dir; +}; + +/** + * iwl_mei_cache - cache for the parameters from iwlwifi + * @ops: Callbacks to iwlwifi. + * @netdev: The netdev that will be used to transmit / receive packets. + * @conn_info: The connection info message triggered by iwlwifi's association. + * @power_limit: pointer to an array of 10 elements (le16) represents the power + * restrictions per chain. + * @rf_kill: rf kill state. + * @mcc: MCC info + * @mac_address: interface MAC address. + * @nvm_address: NVM MAC address. + * @priv: A pointer to iwlwifi. + * + * This used to cache the configurations coming from iwlwifi's way. The data + * is cached here so that we can buffer the configuration even if we don't have + * a bind from the mei bus and hence, on iwl_mei structure. + */ +static struct { + const struct iwl_mei_ops *ops; + struct net_device __rcu *netdev; + const struct iwl_sap_notif_connection_info *conn_info; + const __le16 *power_limit; + u32 rf_kill; + u16 mcc; + u8 mac_address[6]; + u8 nvm_address[6]; + void *priv; +} iwl_mei_cache = { + .rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED +}; + +static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + + if (mei_cldev_dma_unmap(cldev)) + dev_err(&cldev->dev, "Coudln't unmap the shared mem properly\n"); + memset(&mei->shared_mem, 0, sizeof(mei->shared_mem)); +} + +#define HBM_DMA_BUF_ID_WLAN 1 + +static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; + + mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, + IWL_MEI_SAP_SHARED_MEM_SZ); + + if (IS_ERR(mem->ctrl)) { + int ret = PTR_ERR(mem->ctrl); + + dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n", + ret); + mem->ctrl = NULL; + + return ret; + } + + memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ); + + return 0; +} + +static void iwl_mei_init_shared_mem(struct iwl_mei *mei) +{ + struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; + struct iwl_sap_dir *h2m; + struct iwl_sap_dir *m2h; + int dir, queue; + u8 *q_head; + + mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID); + + mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl)); + + h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; + m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; + + h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = + cpu_to_le32(SAP_H2M_DATA_Q_SZ); + h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = + cpu_to_le32(SAP_H2M_NOTIF_Q_SZ); + m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = + cpu_to_le32(SAP_M2H_DATA_Q_SZ); + m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = + cpu_to_le32(SAP_M2H_NOTIF_Q_SZ); + + /* q_head points to the start of the first queue */ + q_head = (void *)(mem->ctrl + 1); + + /* Initialize the queue heads */ + for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) { + for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) { + mem->q_head[dir][queue] = q_head; + q_head += + le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size); + } + } + + *(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID); +} + +static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev, + struct iwl_sap_q_ctrl_blk *notif_q, + u8 *q_head, + const struct iwl_sap_hdr *hdr) +{ + u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); + u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); + u32 q_sz = le32_to_cpu(notif_q->size); + size_t room_in_buf; + size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len); + + if (rd > q_sz || wr > q_sz) { + dev_err(&cldev->dev, + "Pointers are past the end of the buffer\n"); + return -EINVAL; + } + + room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr; + + /* we don't have enough room for the data to write */ + if (room_in_buf < tx_sz) { + dev_err(&cldev->dev, + "Not enough room in the buffer\n"); + return -ENOSPC; + } + + if (wr + tx_sz <= q_sz) { + memcpy(q_head + wr, hdr, tx_sz); + } else { + memcpy(q_head + wr, hdr, q_sz - wr); + memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr)); + } + + WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz)); + return 0; +} + +static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei) +{ + struct iwl_sap_q_ctrl_blk *notif_q; + struct iwl_sap_dir *dir; + + dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; + + if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr)) + return true; + + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; + return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr); +} + +static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_sap_me_msg_start msg = { + .hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA), + .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), + }; + int ret; + + lockdep_assert_held(&iwl_mei_mutex); + + if (mei->csa_throttled) + return 0; + + trace_iwlmei_me_msg(&msg.hdr, true); + ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg)); + if (ret != sizeof(msg)) { + dev_err(&cldev->dev, + "failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n", + ret); + return ret; + } + + mei->csa_throttled = true; + + schedule_delayed_work(&mei->csa_throttle_end_wk, + msecs_to_jiffies(100)); + + return 0; +} + +static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk) +{ + struct iwl_mei *mei = + container_of(wk, struct iwl_mei, csa_throttle_end_wk.work); + + mutex_lock(&iwl_mei_mutex); + + mei->csa_throttled = false; + + if (iwl_mei_host_to_me_data_pending(mei)) + iwl_mei_send_check_shared_area(mei->cldev); + + mutex_unlock(&iwl_mei_mutex); +} + +static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev, + struct iwl_sap_hdr *hdr) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_sap_q_ctrl_blk *notif_q; + struct iwl_sap_dir *dir; + void *q_head; + int ret; + + lockdep_assert_held(&iwl_mei_mutex); + + if (!mei->shared_mem.ctrl) { + dev_err(&cldev->dev, + "No shared memory, can't send any SAP message\n"); + return -EINVAL; + } + + if (!iwl_mei_is_connected()) { + dev_err(&cldev->dev, + "Can't send a SAP message if we're not connected\n"); + return -ENODEV; + } + + hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); + dev_dbg(&cldev->dev, "Sending %d\n", hdr->type); + + dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; + q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF]; + ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr); + + if (ret < 0) + return ret; + + trace_iwlmei_sap_cmd(hdr, true); + + return iwl_mei_send_check_shared_area(cldev); +} + +void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx) +{ + struct iwl_sap_q_ctrl_blk *notif_q; + struct iwl_sap_dir *dir; + struct iwl_mei *mei; + size_t room_in_buf; + size_t tx_sz; + size_t hdr_sz; + u32 q_sz; + u32 rd; + u32 wr; + void *q_head; + + if (!iwl_mei_global_cldev) + return; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + /* + * We access this path for Rx packets (the more common case) + * and from Tx path when we send DHCP packets, the latter is + * very unlikely. + * Take the lock already here to make sure we see that remove() + * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit. + */ + spin_lock_bh(&mei->data_q_lock); + + if (!iwl_mei_is_connected()) { + spin_unlock_bh(&mei->data_q_lock); + return; + } + + /* + * We are in a RCU critical section and the remove from the CSME bus + * which would free this memory waits for the readers to complete (this + * is done in netdev_rx_handler_unregister). + */ + dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; + q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA]; + + rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); + wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); + q_sz = le32_to_cpu(notif_q->size); + hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) : + sizeof(struct iwl_sap_hdr); + tx_sz = skb->len + hdr_sz; + + if (rd > q_sz || wr > q_sz) { + dev_err(&mei->cldev->dev, + "can't write the data: pointers are past the end of the buffer\n"); + goto out; + } + + room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr; + + /* we don't have enough room for the data to write */ + if (room_in_buf < tx_sz) { + dev_err(&mei->cldev->dev, + "Not enough room in the buffer for this data\n"); + goto out; + } + + if (skb_headroom(skb) < hdr_sz) { + dev_err(&mei->cldev->dev, + "Not enough headroom in the skb to write the SAP header\n"); + goto out; + } + + if (cb_tx) { + struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr)); + + cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET); + cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr)); + cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); + cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX)); + cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr)); + trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP); + } else { + struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr)); + + hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET); + hdr->len = cpu_to_le16(skb->len - sizeof(*hdr)); + hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); + trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR); + } + + if (wr + tx_sz <= q_sz) { + skb_copy_bits(skb, 0, q_head + wr, tx_sz); + } else { + skb_copy_bits(skb, 0, q_head + wr, q_sz - wr); + skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr)); + } + + WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz)); + +out: + spin_unlock_bh(&mei->data_q_lock); +} + +static int +iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type) +{ + struct iwl_sap_hdr msg = { + .type = cpu_to_le16(type), + }; + + return iwl_mei_send_sap_msg_payload(cldev, &msg); +} + +static void iwl_mei_send_csa_msg_wk(struct work_struct *wk) +{ + struct iwl_mei *mei = + container_of(wk, struct iwl_mei, send_csa_msg_wk); + + if (!iwl_mei_is_connected()) + return; + + mutex_lock(&iwl_mei_mutex); + + iwl_mei_send_check_shared_area(mei->cldev); + + mutex_unlock(&iwl_mei_mutex); +} + +/* Called in a RCU read critical section from netif_receive_skb */ +static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct iwl_mei *mei = + rcu_dereference(skb->dev->rx_handler_data); + struct iwl_mei_filters *filters = rcu_dereference(mei->filters); + bool rx_for_csme = false; + rx_handler_result_t res; + + /* + * remove() unregisters this handler and synchronize_net, so this + * should never happen. + */ + if (!iwl_mei_is_connected()) { + dev_err(&mei->cldev->dev, + "Got an Rx packet, but we're not connected to SAP?\n"); + return RX_HANDLER_PASS; + } + + if (filters) + res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme); + else + res = RX_HANDLER_PASS; + + /* + * The data is already on the ring of the shared area, all we + * need to do is to tell the CSME firmware to check what we have + * there. + */ + if (rx_for_csme) + schedule_work(&mei->send_csa_msg_wk); + + if (res != RX_HANDLER_PASS) { + trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR); + dev_kfree_skb(skb); + } + + return res; +} + +static void +iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev, + const struct iwl_sap_me_msg_start_ok *rsp, + ssize_t len) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + + if (len != sizeof(*rsp)) { + dev_err(&cldev->dev, + "got invalid SAP_ME_MSG_START_OK from CSME firmware\n"); + dev_err(&cldev->dev, + "size is incorrect: %zd instead of %zu\n", + len, sizeof(*rsp)); + return; + } + + if (rsp->supported_version != SAP_VERSION) { + dev_err(&cldev->dev, + "didn't get the expected version: got %d\n", + rsp->supported_version); + return; + } + + mutex_lock(&iwl_mei_mutex); + set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); + /* wifi driver has registered already */ + if (iwl_mei_cache.ops) { + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_WIFIDR_UP); + iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv); + } + + mutex_unlock(&iwl_mei_mutex); +} + +static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev, + const struct iwl_sap_csme_filters *filters) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + struct iwl_mei_filters *new_filters; + struct iwl_mei_filters *old_filters; + + old_filters = + rcu_dereference_protected(mei->filters, + lockdep_is_held(&iwl_mei_mutex)); + + new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL); + + /* Copy the OOB filters */ + new_filters->filters = filters->filters; + + rcu_assign_pointer(mei->filters, new_filters); + + if (old_filters) + kfree_rcu(old_filters, rcu_head); +} + +static void +iwl_mei_handle_conn_status(struct mei_cl_device *cldev, + const struct iwl_sap_notif_conn_status *status) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_mei_conn_info conn_info = { + .lp_state = le32_to_cpu(status->link_prot_state), + .ssid_len = le32_to_cpu(status->conn_info.ssid_len), + .channel = status->conn_info.channel, + .band = status->conn_info.band, + .auth_mode = le32_to_cpu(status->conn_info.auth_mode), + .pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher), + }; + + if (!iwl_mei_cache.ops || + conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid)) + return; + + memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len); + ether_addr_copy(conn_info.bssid, status->conn_info.bssid); + + iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info); + + /* + * Update the Rfkill state in case the host does not own the device: + * if we are in Link Protection, ask to not touch the device, else, + * unblock rfkill. + * If the host owns the device, inform the user space whether it can + * roam. + */ + if (mei->got_ownership) + iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv, + status->link_prot_state); + else + iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, + status->link_prot_state); +} + +static void iwl_mei_set_init_conf(struct iwl_mei *mei) +{ + struct iwl_sap_notif_host_link_up link_msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP), + .hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)), + }; + struct iwl_sap_notif_country_code mcc_msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE), + .hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)), + .mcc = cpu_to_le16(iwl_mei_cache.mcc), + }; + struct iwl_sap_notif_sar_limits sar_msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS), + .hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)), + }; + struct iwl_sap_notif_host_nic_info nic_info_msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO), + .hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)), + }; + struct iwl_sap_msg_dw rfkill_msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE), + .hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)), + .val = cpu_to_le32(iwl_mei_cache.rf_kill), + }; + + iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC); + + if (iwl_mei_cache.conn_info) { + link_msg.conn_info = *iwl_mei_cache.conn_info; + iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr); + } + + iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr); + + if (iwl_mei_cache.power_limit) { + memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit, + sizeof(sar_msg.sar_chain_info_table)); + iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr); + } + + ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address); + ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address); + iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr); + + iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr); +} + +static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev, + const struct iwl_sap_msg_dw *dw) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct net_device *netdev; + + /* + * First take rtnl and only then the mutex to avoid an ABBA + * with iwl_mei_set_netdev() + */ + rtnl_lock(); + mutex_lock(&iwl_mei_mutex); + + netdev = rcu_dereference_protected(iwl_mei_cache.netdev, + lockdep_is_held(&iwl_mei_mutex)); + + if (mei->amt_enabled == !!le32_to_cpu(dw->val)) + goto out; + + mei->amt_enabled = dw->val; + + if (mei->amt_enabled) { + if (netdev) + netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei); + + iwl_mei_set_init_conf(mei); + } else { + if (iwl_mei_cache.ops) + iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); + if (netdev) + netdev_rx_handler_unregister(netdev); + } + +out: + mutex_unlock(&iwl_mei_mutex); + rtnl_unlock(); +} + +static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev, + const struct iwl_sap_msg_dw *dw) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + + mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME); +} + +static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev, + const void *payload) +{ + /* We can get ownership and driver is registered, go ahead */ + if (iwl_mei_cache.ops) + iwl_mei_send_sap_msg(cldev, + SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP); +} + +static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev, + const void *payload) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + + dev_info(&cldev->dev, "CSME takes ownership\n"); + + mei->got_ownership = false; + + /* + * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi driver + * is finished taking the device down. + */ + mei->csme_taking_ownership = true; + + if (iwl_mei_cache.ops) + iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true); +} + +static void iwl_mei_handle_nvm(struct mei_cl_device *cldev, + const struct iwl_sap_nvm *sap_nvm) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm; + int i; + + kfree(mei->nvm); + mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL); + if (!mei->nvm) + return; + + ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr); + mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs; + mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg); + mei->nvm->caps = le32_to_cpu(sap_nvm->caps); + mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version); + + for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++) + mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]); + + wake_up_all(&mei->get_nvm_wq); +} + +static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev, + const struct iwl_sap_msg_dw *dw) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + + /* + * This means that we can't use the wifi device right now, CSME is not + * ready to let us use it. + */ + if (!dw->val) { + dev_info(&cldev->dev, "Ownership req denied\n"); + return; + } + + mei->got_ownership = true; + wake_up_all(&mei->get_ownership_wq); + + iwl_mei_send_sap_msg(cldev, + SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED); + + /* We can now start the connection, unblock rfkill */ + if (iwl_mei_cache.ops) + iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); +} + +static void iwl_mei_handle_ping(struct mei_cl_device *cldev, + const struct iwl_sap_hdr *hdr) +{ + iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG); +} + +static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev, + const struct iwl_sap_hdr *hdr) +{ + u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr); + u16 type = le16_to_cpu(hdr->type); + + dev_dbg(&cldev->dev, + "Got a new SAP message: type %d, len %d, seq %d\n", + le16_to_cpu(hdr->type), len, + le32_to_cpu(hdr->seq_num)); + +#define SAP_MSG_HANDLER(_cmd, _handler, _sz) \ + case SAP_MSG_NOTIF_ ## _cmd: \ + if (len < _sz) { \ + dev_err(&cldev->dev, \ + "Bad size for %d: %u < %u\n", \ + le16_to_cpu(hdr->type), \ + (unsigned int)len, \ + (unsigned int)_sz); \ + break; \ + } \ + mutex_lock(&iwl_mei_mutex); \ + _handler(cldev, (const void *)hdr); \ + mutex_unlock(&iwl_mei_mutex); \ + break + +#define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz) \ + case SAP_MSG_NOTIF_ ## _cmd: \ + if (len < _sz) { \ + dev_err(&cldev->dev, \ + "Bad size for %d: %u < %u\n", \ + le16_to_cpu(hdr->type), \ + (unsigned int)len, \ + (unsigned int)_sz); \ + break; \ + } \ + _handler(cldev, (const void *)hdr); \ + break + +#define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz) \ + case SAP_MSG_NOTIF_ ## _cmd: \ + if (len < _sz) { \ + dev_err(&cldev->dev, \ + "Bad size for %d: %u < %u\n", \ + le16_to_cpu(hdr->type), \ + (unsigned int)len, \ + (unsigned int)_sz); \ + break; \ + } \ + break + + switch (type) { + SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0); + SAP_MSG_HANDLER(CSME_FILTERS, + iwl_mei_handle_csme_filters, + sizeof(struct iwl_sap_csme_filters)); + SAP_MSG_HANDLER(CSME_CONN_STATUS, + iwl_mei_handle_conn_status, + sizeof(struct iwl_sap_notif_conn_status)); + SAP_MSG_HANDLER_NO_LOCK(AMT_STATE, + iwl_mei_handle_amt_state, + sizeof(struct iwl_sap_msg_dw)); + SAP_MSG_HANDLER_NO_HANDLER(PONG, 0); + SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm, + sizeof(struct iwl_sap_nvm)); + SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ, + iwl_mei_handle_rx_host_own_req, + sizeof(struct iwl_sap_msg_dw)); + SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner, + sizeof(struct iwl_sap_msg_dw)); + SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP, + iwl_mei_handle_can_release_ownership, 0); + SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP, + iwl_mei_handle_csme_taking_ownership, 0); + default: + /* + * This is not really an error, there are message that we decided + * to ignore, yet, it is useful to be able to leave a note if debug + * is enabled. + */ + dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n", + le16_to_cpu(hdr->type), len); + } + +#undef SAP_MSG_HANDLER +#undef SAP_MSG_HANDLER_NO_LOCK +} + +static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz, + u32 *_rd, u32 wr, + void *_buf, u32 len) +{ + u8 *buf = _buf; + u32 rd = *_rd; + + if (rd + len <= q_sz) { + memcpy(buf, q_head + rd, len); + rd += len; + } else { + memcpy(buf, q_head + rd, q_sz - rd); + memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd)); + rd = len - (q_sz - rd); + } + + *_rd = rd; +} + +#define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) + \ + IEEE80211_TKIP_IV_LEN + \ + sizeof(rfc1042_header) + ETH_TLEN) + +static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev, + const u8 *q_head, u32 q_sz, + u32 rd, u32 wr, ssize_t valid_rx_sz, + struct sk_buff_head *tx_skbs) +{ + struct iwl_sap_hdr hdr; + struct net_device *netdev = + rcu_dereference_protected(iwl_mei_cache.netdev, + lockdep_is_held(&iwl_mei_mutex)); + + if (!netdev) + return; + + while (valid_rx_sz >= sizeof(hdr)) { + struct ethhdr *ethhdr; + unsigned char *data; + struct sk_buff *skb; + u16 len; + + iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr)); + valid_rx_sz -= sizeof(hdr); + len = le16_to_cpu(hdr.len); + + if (valid_rx_sz < len) { + dev_err(&cldev->dev, + "Data queue is corrupted: valid data len %zd, len %d\n", + valid_rx_sz, len); + break; + } + + if (len < sizeof(*ethhdr)) { + dev_err(&cldev->dev, + "Data len is smaller than an ethernet header? len = %d\n", + len); + } + + valid_rx_sz -= len; + + if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) { + dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n", + le16_to_cpu(hdr.type), len); + continue; + } + + /* We need enough room for the WiFi header + SNAP + IV */ + skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN); + + skb_reserve(skb, QOS_HDR_IV_SNAP_LEN); + ethhdr = skb_push(skb, sizeof(*ethhdr)); + + iwl_mei_read_from_q(q_head, q_sz, &rd, wr, + ethhdr, sizeof(*ethhdr)); + len -= sizeof(*ethhdr); + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb->protocol = ethhdr->h_proto; + + data = skb_put(skb, len); + iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len); + + /* + * Enqueue the skb here so that it can be sent later when we + * do not hold the mutex. TX'ing a packet with a mutex held is + * possible, but it wouldn't be nice to forbid the TX path to + * call any of iwlmei's functions, since every API from iwlmei + * needs the mutex. + */ + __skb_queue_tail(tx_skbs, skb); + } +} + +static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev, + const u8 *q_head, u32 q_sz, + u32 rd, u32 wr, ssize_t valid_rx_sz) +{ + struct page *p = alloc_page(GFP_KERNEL); + struct iwl_sap_hdr *hdr; + + if (!p) + return; + + hdr = page_address(p); + + while (valid_rx_sz >= sizeof(*hdr)) { + u16 len; + + iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr)); + valid_rx_sz -= sizeof(*hdr); + len = le16_to_cpu(hdr->len); + + if (valid_rx_sz < len) + break; + + iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len); + + trace_iwlmei_sap_cmd(hdr, false); + iwl_mei_handle_sap_msg(cldev, hdr); + valid_rx_sz -= len; + } + + /* valid_rx_sz must be 0 now... */ + if (valid_rx_sz) + dev_err(&cldev->dev, + "More data in the buffer although we read it all\n"); + + __free_page(p); +} + +static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev, + struct iwl_sap_q_ctrl_blk *notif_q, + const u8 *q_head, + struct sk_buff_head *skbs) +{ + u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); + u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); + u32 q_sz = le32_to_cpu(notif_q->size); + ssize_t valid_rx_sz; + + if (rd > q_sz || wr > q_sz) { + dev_err(&cldev->dev, + "Pointers are past the buffer limit\n"); + return; + } + + if (rd == wr) + return; + + valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr; + + if (skbs) + iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr, + valid_rx_sz, skbs); + else + iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr, + valid_rx_sz); + + /* Increment the read pointer to point to the write pointer */ + WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr)); +} + +static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_sap_q_ctrl_blk *notif_q; + struct sk_buff_head tx_skbs; + struct iwl_sap_dir *dir; + void *q_head; + + if (!mei->shared_mem.ctrl) + return; + + dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; + q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF]; + + /* + * Do not hold the mutex here, but rather each and every message + * handler takes it. + * This allows message handlers to take it at a certain time. + */ + iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL); + + mutex_lock(&iwl_mei_mutex); + dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; + q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA]; + + __skb_queue_head_init(&tx_skbs); + + iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs); + + if (skb_queue_empty(&tx_skbs)) { + mutex_unlock(&iwl_mei_mutex); + return; + } + + /* + * Take the RCU read lock before we unlock the mutex to make sure that + * even if the netdev is replaced by another non-NULL netdev right after + * we unlock the mutex, the old netdev will still be valid when we + * transmit the frames. We can't allow to replace the netdev here because + * the skbs hold a pointer to the netdev. + */ + rcu_read_lock(); + + mutex_unlock(&iwl_mei_mutex); + + if (!rcu_access_pointer(iwl_mei_cache.netdev)) { + dev_err(&cldev->dev, "Can't Tx without a netdev\n"); + skb_queue_purge(&tx_skbs); + goto out; + } + + while (!skb_queue_empty(&tx_skbs)) { + struct sk_buff *skb = __skb_dequeue(&tx_skbs); + + trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR); + dev_queue_xmit(skb); + } + +out: + rcu_read_unlock(); +} + +static void iwl_mei_rx(struct mei_cl_device *cldev) +{ + struct iwl_sap_me_msg_hdr *hdr; + u8 msg[100]; + ssize_t ret; + + ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg)); + if (ret < 0) { + dev_err(&cldev->dev, "failed to receive data: %zd\n", ret); + return; + } + + if (ret == 0) { + dev_err(&cldev->dev, "got an empty response\n"); + return; + } + + hdr = (void *)msg; + trace_iwlmei_me_msg(hdr, false); + + switch (le32_to_cpu(hdr->type)) { + case SAP_ME_MSG_START_OK: + BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) > + sizeof(msg)); + + iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret); + break; + case SAP_ME_MSG_CHECK_SHARED_AREA: + iwl_mei_handle_check_shared_area(cldev); + break; + default: + dev_err(&cldev->dev, "got a RX notification: %d\n", + le32_to_cpu(hdr->type)); + break; + } +} + +static int iwl_mei_send_start(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_sap_me_msg_start msg = { + .hdr.type = cpu_to_le32(SAP_ME_MSG_START), + .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), + .hdr.len = cpu_to_le32(sizeof(msg)), + .supported_versions[0] = SAP_VERSION, + .init_data_seq_num = cpu_to_le16(0x100), + .init_notif_seq_num = cpu_to_le16(0x800), + }; + int ret; + + trace_iwlmei_me_msg(&msg.hdr, true); + ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg)); + if (ret != sizeof(msg)) { + dev_err(&cldev->dev, + "failed to send the SAP_ME_MSG_START message %d\n", + ret); + return ret; + } + + return 0; +} + +static int iwl_mei_enable(struct mei_cl_device *cldev) +{ + int ret; + + ret = mei_cldev_enable(cldev); + if (ret < 0) { + dev_err(&cldev->dev, "failed to enable the device: %d\n", ret); + return ret; + } + + ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx); + if (ret) { + dev_err(&cldev->dev, + "failed to register to the rx cb: %d\n", ret); + mei_cldev_disable(cldev); + return ret; + } + + return 0; +} + +struct iwl_mei_nvm *iwl_mei_get_nvm(void) +{ + struct iwl_mei_nvm *nvm = NULL; + struct iwl_mei *mei; + int ret; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev, + SAP_MSG_NOTIF_GET_NVM); + if (ret) + goto out; + + mutex_unlock(&iwl_mei_mutex); + + ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ); + if (!ret) + return NULL; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + if (mei->nvm) + nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL); + +out: + mutex_unlock(&iwl_mei_mutex); + return nvm; +} +EXPORT_SYMBOL_GPL(iwl_mei_get_nvm); + +int iwl_mei_get_ownership(void) +{ + struct iwl_mei *mei; + int ret; + + mutex_lock(&iwl_mei_mutex); + + /* In case we didn't have a bind */ + if (!iwl_mei_is_connected()) { + ret = 0; + goto out; + } + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) { + ret = -ENODEV; + goto out; + } + + if (!mei->amt_enabled) { + ret = 0; + goto out; + } + + if (mei->got_ownership) { + ret = 0; + goto out; + } + + ret = iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP); + if (ret) + goto out; + + mutex_unlock(&iwl_mei_mutex); + + ret = wait_event_timeout(mei->get_ownership_wq, + mei->got_ownership, HZ / 2); + if (!ret) + return -ETIMEDOUT; + + mutex_lock(&iwl_mei_mutex); + + /* In case we didn't have a bind */ + if (!iwl_mei_is_connected()) { + ret = 0; + goto out; + } + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) { + ret = -ENODEV; + goto out; + } + + ret = !mei->got_ownership; + +out: + mutex_unlock(&iwl_mei_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(iwl_mei_get_ownership); + +void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info, + const struct iwl_mei_colloc_info *colloc_info) +{ + struct iwl_sap_notif_host_link_up msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + .conn_info = { + .ssid_len = cpu_to_le32(conn_info->ssid_len), + .channel = conn_info->channel, + .band = conn_info->band, + .pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher), + .auth_mode = cpu_to_le32(conn_info->auth_mode), + }, + }; + struct iwl_mei *mei; + + if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid)) + return; + + memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len); + memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN); + + if (colloc_info) { + msg.colloc_channel = colloc_info->channel; + msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1; + memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN); + } + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + if (!mei->amt_enabled) + goto out; + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + kfree(iwl_mei_cache.conn_info); + iwl_mei_cache.conn_info = + kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL); + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_host_associated); + +void iwl_mei_host_disassociated(void) +{ + struct iwl_mei *mei; + struct iwl_sap_notif_host_link_down msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + .type = HOST_LINK_DOWN_TYPE_LONG, + }; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + kfree(iwl_mei_cache.conn_info); + iwl_mei_cache.conn_info = NULL; + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated); + +void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill) +{ + struct iwl_mei *mei; + u32 rfkill_state = 0; + struct iwl_sap_msg_dw msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + }; + + if (!sw_rfkill) + rfkill_state |= SAP_SW_RFKILL_DEASSERTED; + + if (!hw_rfkill) + rfkill_state |= SAP_HW_RFKILL_DEASSERTED; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + msg.val = cpu_to_le32(rfkill_state); + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + iwl_mei_cache.rf_kill = rfkill_state; + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state); + +void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address) +{ + struct iwl_mei *mei; + struct iwl_sap_notif_host_nic_info msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + }; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + ether_addr_copy(msg.mac_address, mac_address); + ether_addr_copy(msg.nvm_address, nvm_address); + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + ether_addr_copy(iwl_mei_cache.mac_address, mac_address); + ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address); + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info); + +void iwl_mei_set_country_code(u16 mcc) +{ + struct iwl_mei *mei; + struct iwl_sap_notif_country_code msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + .mcc = cpu_to_le16(mcc), + }; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + iwl_mei_cache.mcc = mcc; + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_set_country_code); + +void iwl_mei_set_power_limit(const __le16 *power_limit) +{ + struct iwl_mei *mei; + struct iwl_sap_notif_sar_limits msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + }; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table)); + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + kfree(iwl_mei_cache.power_limit); + iwl_mei_cache.power_limit = kmemdup(power_limit, + sizeof(msg.sar_chain_info_table), GFP_KERNEL); + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit); + +void iwl_mei_set_netdev(struct net_device *netdev) +{ + struct iwl_mei *mei; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) { + rcu_assign_pointer(iwl_mei_cache.netdev, netdev); + goto out; + } + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + if (!netdev) { + struct net_device *dev = + rcu_dereference_protected(iwl_mei_cache.netdev, + lockdep_is_held(&iwl_mei_mutex)); + + if (!dev) + goto out; + + netdev_rx_handler_unregister(dev); + } + + rcu_assign_pointer(iwl_mei_cache.netdev, netdev); + + if (netdev && mei->amt_enabled) + netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei); + +out: + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_set_netdev); + +void iwl_mei_device_down(void) +{ + struct iwl_mei *mei; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + if (!mei->csme_taking_ownership) + goto out; + + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED); + mei->csme_taking_ownership = false; +out: + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_device_down); + +int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops) +{ + int ret; + + /* + * We must have a non-NULL priv pointer to not crash when there are + * multiple WiFi devices. + */ + if (!priv) + return -EINVAL; + + mutex_lock(&iwl_mei_mutex); + + /* do not allow registration if someone else already registered */ + if (iwl_mei_cache.priv || iwl_mei_cache.ops) { + ret = -EBUSY; + goto out; + } + + iwl_mei_cache.priv = priv; + iwl_mei_cache.ops = ops; + + if (iwl_mei_global_cldev) { + struct iwl_mei *mei = + mei_cldev_get_drvdata(iwl_mei_global_cldev); + + /* we have already a SAP connection */ + if (iwl_mei_is_connected()) + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_WIFIDR_UP); + } + ret = 0; + +out: + mutex_unlock(&iwl_mei_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(iwl_mei_register); + +void iwl_mei_start_unregister(void) +{ + mutex_lock(&iwl_mei_mutex); + + /* At this point, the wifi driver should have removed the netdev */ + if (rcu_access_pointer(iwl_mei_cache.netdev)) + pr_err("Still had a netdev pointer set upon unregister\n"); + + kfree(iwl_mei_cache.conn_info); + iwl_mei_cache.conn_info = NULL; + kfree(iwl_mei_cache.power_limit); + iwl_mei_cache.power_limit = NULL; + iwl_mei_cache.ops = NULL; + /* leave iwl_mei_cache.priv non-NULL to prevent any new registration */ + + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_start_unregister); + +void iwl_mei_unregister_complete(void) +{ + mutex_lock(&iwl_mei_mutex); + + iwl_mei_cache.priv = NULL; + + if (iwl_mei_global_cldev) { + struct iwl_mei *mei = + mei_cldev_get_drvdata(iwl_mei_global_cldev); + + iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN); + } + + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete); + +#if IS_ENABLED(CONFIG_DEBUG_FS) + +static ssize_t +iwl_mei_dbgfs_send_start_message_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int ret; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_global_cldev) { + ret = -ENODEV; + goto out; + } + + ret = iwl_mei_send_start(iwl_mei_global_cldev); + +out: + mutex_unlock(&iwl_mei_mutex); + return ret ?: count; +} + +static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = { + .write = iwl_mei_dbgfs_send_start_message_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + iwl_mei_get_ownership(); + + return count; +} + +static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = { + .write = iwl_mei_dbgfs_req_ownership_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static void iwl_mei_dbgfs_register(struct iwl_mei *mei) +{ + mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); + + if (!mei->dbgfs_dir) + return; + + debugfs_create_ulong("status", S_IRUSR, + mei->dbgfs_dir, &iwl_mei_status); + debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir, + mei, &iwl_mei_dbgfs_send_start_message_ops); + debugfs_create_file("req_ownserhip", S_IWUSR, mei->dbgfs_dir, + mei, &iwl_mei_dbgfs_req_ownership_ops); +} + +static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) +{ + debugfs_remove_recursive(mei->dbgfs_dir); + mei->dbgfs_dir = NULL; +} + +#else + +static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {} +static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {} + +#endif /* CONFIG_DEBUG_FS */ + +/** + * iwl_mei_probe - the probe function called by the mei bus enumeration + * + * This allocates the data needed by iwlmei and sets a pointer to this data + * into the mei_cl_device's drvdata. + * It starts the SAP protocol by sending the SAP_ME_MSG_START without + * waiting for the answer. The answer will be caught later by the Rx callback. + */ +static int iwl_mei_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct iwl_mei *mei; + int ret; + + mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL); + if (!mei) + return -ENOMEM; + + init_waitqueue_head(&mei->get_nvm_wq); + INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk); + INIT_DELAYED_WORK(&mei->csa_throttle_end_wk, + iwl_mei_csa_throttle_end_wk); + init_waitqueue_head(&mei->get_ownership_wq); + spin_lock_init(&mei->data_q_lock); + + mei_cldev_set_drvdata(cldev, mei); + mei->cldev = cldev; + + ret = iwl_mei_alloc_shared_mem(cldev); + if (ret) + goto free; + + iwl_mei_init_shared_mem(mei); + + ret = iwl_mei_enable(cldev); + if (ret) + goto free_shared_mem; + + iwl_mei_dbgfs_register(mei); + + /* + * We now have a Rx function in place, start the SAP procotol + * we expect to get the SAP_ME_MSG_START_OK response later on. + */ + mutex_lock(&iwl_mei_mutex); + ret = iwl_mei_send_start(cldev); + mutex_unlock(&iwl_mei_mutex); + if (ret) + goto debugfs_unregister; + + /* must be last */ + iwl_mei_global_cldev = cldev; + + return 0; + +debugfs_unregister: + iwl_mei_dbgfs_unregister(mei); + mei_cldev_disable(cldev); +free_shared_mem: + iwl_mei_free_shared_mem(cldev); +free: + mei_cldev_set_drvdata(cldev, NULL); + devm_kfree(&cldev->dev, mei); + + return ret; +} + +#define SEND_SAP_MAX_WAIT_ITERATION 10 + +static void iwl_mei_remove(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + int i; + + /* + * We are being removed while the bus is active, it means we are + * going to suspend/ shutdown, so the NIC will disappear. + */ + if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops) + iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv); + + if (rcu_access_pointer(iwl_mei_cache.netdev)) { + struct net_device *dev; + + /* + * First take rtnl and only then the mutex to avoid an ABBA + * with iwl_mei_set_netdev() + */ + rtnl_lock(); + mutex_lock(&iwl_mei_mutex); + + /* + * If we are suspending and the wifi driver hasn't removed it's netdev + * yet, do it now. In any case, don't change the cache.netdev pointer. + */ + dev = rcu_dereference_protected(iwl_mei_cache.netdev, + lockdep_is_held(&iwl_mei_mutex)); + + netdev_rx_handler_unregister(dev); + mutex_unlock(&iwl_mei_mutex); + rtnl_unlock(); + } + + mutex_lock(&iwl_mei_mutex); + + /* + * Tell CSME that we are going down so that it won't access the + * memory anymore, make sure this message goes through immediately. + */ + mei->csa_throttled = false; + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_HOST_GOES_DOWN); + + for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) { + if (!iwl_mei_host_to_me_data_pending(mei)) + break; + + msleep(5); + } + + /* + * If we couldn't make sure that CSME saw the HOST_GOES_DOWN message, + * it means that it will probably keep reading memory that we are going + * to unmap and free, expect IOMMU error messages. + */ + if (i == SEND_SAP_MAX_WAIT_ITERATION) + dev_err(&mei->cldev->dev, + "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n"); + + mutex_unlock(&iwl_mei_mutex); + + /* + * This looks strange, but this lock is taken here to make sure that + * iwl_mei_add_data_to_ring called from the Tx path sees that we + * clear the IWL_MEI_STATUS_SAP_CONNECTED bit. + * Rx isn't a problem because the rx_handler can't be called after + * having been unregistered. + */ + spin_lock_bh(&mei->data_q_lock); + clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); + spin_unlock_bh(&mei->data_q_lock); + + if (iwl_mei_cache.ops) + iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); + + /* + * mei_cldev_disable will return only after all the MEI Rx is done. + * It must be called when iwl_mei_mutex is *not* held, since it waits + * for our Rx handler to complete. + * After it returns, no new Rx will start. + */ + mei_cldev_disable(cldev); + + /* + * Since the netdev was already removed and the netdev's removal + * includes a call to synchronize_net() so that we know there won't be + * any new Rx that will trigger the following workers. + */ + cancel_work_sync(&mei->send_csa_msg_wk); + cancel_delayed_work_sync(&mei->csa_throttle_end_wk); + + /* + * If someone waits for the ownership, let him know that we are going + * down and that we are not connected anymore. He'll be able to take + * the device. + */ + wake_up_all(&mei->get_ownership_wq); + + mutex_lock(&iwl_mei_mutex); + + iwl_mei_global_cldev = NULL; + + wake_up_all(&mei->get_nvm_wq); + + iwl_mei_free_shared_mem(cldev); + + iwl_mei_dbgfs_unregister(mei); + + mei_cldev_set_drvdata(cldev, NULL); + + kfree(mei->nvm); + + kfree(rcu_access_pointer(mei->filters)); + + devm_kfree(&cldev->dev, mei); + + mutex_unlock(&iwl_mei_mutex); +} + +static const struct mei_cl_device_id iwl_mei_tbl[] = { + { KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY}, + + /* required last entry */ + { } +}; + +/* + * Do not export the device table because this module is loaded by + * iwlwifi's dependency. + */ + +static struct mei_cl_driver iwl_mei_cl_driver = { + .id_table = iwl_mei_tbl, + .name = KBUILD_MODNAME, + .probe = iwl_mei_probe, + .remove = iwl_mei_remove, +}; + +module_mei_cl_driver(iwl_mei_cl_driver); diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c new file mode 100644 index 000000000000..5f966af69720 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#include <uapi/linux/if_ether.h> +#include <uapi/linux/if_arp.h> +#include <uapi/linux/icmp.h> + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/ieee80211.h> + +#include <net/cfg80211.h> +#include <net/ip.h> + +#include <linux/if_arp.h> +#include <linux/icmp.h> +#include <linux/udp.h> +#include <linux/ip.h> +#include <linux/mm.h> + +#include "internal.h" +#include "sap.h" +#include "iwl-mei.h" + +/* + * Returns true if further filtering should be stopped. Only in that case + * pass_to_csme and rx_handler_res are set. Otherwise, next level of filters + * should be checked. + */ +static bool iwl_mei_rx_filter_eth(const struct ethhdr *ethhdr, + const struct iwl_sap_oob_filters *filters, + bool *pass_to_csme, + rx_handler_result_t *rx_handler_res) +{ + const struct iwl_sap_eth_filter *filt; + + /* This filter is not relevant for UCAST packet */ + if (!is_multicast_ether_addr(ethhdr->h_dest) || + is_broadcast_ether_addr(ethhdr->h_dest)) + return false; + + for (filt = &filters->eth_filters[0]; + filt < &filters->eth_filters[0] + ARRAY_SIZE(filters->eth_filters); + filt++) { + /* Assume there are no enabled filter after a disabled one */ + if (!(filt->flags & SAP_ETH_FILTER_ENABLED)) + break; + + if (compare_ether_header(filt->mac_address, ethhdr->h_dest)) + continue; + + /* Packet needs to reach the host's stack */ + if (filt->flags & SAP_ETH_FILTER_COPY) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + /* We have an authoritative answer, stop filtering */ + if (filt->flags & SAP_ETH_FILTER_STOP) { + *pass_to_csme = true; + return true; + } + + return false; + } + + /* MCAST frames that don't match layer 2 filters are not sent to ME */ + *pass_to_csme = false; + + return true; +} + +/* + * Returns true iff the frame should be passed to CSME in which case + * rx_handler_res is set. + */ +static bool iwl_mei_rx_filter_arp(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + rx_handler_result_t *rx_handler_res) +{ + const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter; + const struct arphdr *arp; + const __be32 *target_ip; + u32 flags = le32_to_cpu(filt->flags); + + if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) + return false; + + arp = arp_hdr(skb); + + /* Handle only IPv4 over ethernet ARP frames */ + if (arp->ar_hrd != htons(ARPHRD_ETHER) || + arp->ar_pro != htons(ETH_P_IP)) + return false; + + /* + * After the ARP header, we have: + * src MAC address - 6 bytes + * src IP address - 4 bytes + * target MAC addess - 6 bytes + */ + target_ip = (void *)((u8 *)(arp + 1) + + ETH_ALEN + sizeof(__be32) + ETH_ALEN); + + /* + * ARP request is forwarded to ME only if IP address match in the + * ARP request's target ip field. + */ + if (arp->ar_op == htons(ARPOP_REQUEST) && + (filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ARP_REQ_PASS)) && + (filt->ipv4_addr == 0 || filt->ipv4_addr == *target_ip)) { + if (flags & SAP_IPV4_FILTER_ARP_REQ_COPY) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + return true; + } + + /* ARP reply is always forwarded to ME regardless of the IP */ + if (flags & SAP_IPV4_FILTER_ARP_RESP_PASS && + arp->ar_op == htons(ARPOP_REPLY)) { + if (flags & SAP_IPV4_FILTER_ARP_RESP_COPY) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + return true; + } + + return false; +} + +static bool +iwl_mei_rx_filter_tcp_udp(struct sk_buff *skb, bool ip_match, + const struct iwl_sap_oob_filters *filters, + rx_handler_result_t *rx_handler_res) +{ + const struct iwl_sap_flex_filter *filt; + + for (filt = &filters->flex_filters[0]; + filt < &filters->flex_filters[0] + ARRAY_SIZE(filters->flex_filters); + filt++) { + if (!(filt->flags & SAP_FLEX_FILTER_ENABLED)) + break; + + /* + * We are required to have a match on the IP level and we didn't + * have such match. + */ + if ((filt->flags & + (SAP_FLEX_FILTER_IPV4 | SAP_FLEX_FILTER_IPV6)) && + !ip_match) + continue; + + if ((filt->flags & SAP_FLEX_FILTER_UDP) && + ip_hdr(skb)->protocol != IPPROTO_UDP) + continue; + + if ((filt->flags & SAP_FLEX_FILTER_TCP) && + ip_hdr(skb)->protocol != IPPROTO_TCP) + continue; + + /* + * We must have either a TCP header or a UDP header, both + * starts with a source port and then a destination port. + * Both are big endian words. + * Use a UDP header and that will work for TCP as well. + */ + if ((filt->src_port && filt->src_port != udp_hdr(skb)->source) || + (filt->dst_port && filt->dst_port != udp_hdr(skb)->dest)) + continue; + + if (filt->flags & SAP_FLEX_FILTER_COPY) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + return true; + } + + return false; +} + +static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + rx_handler_result_t *rx_handler_res) +{ + const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter; + const struct iphdr *iphdr; + unsigned int iphdrlen; + bool match; + + if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) || + !pskb_may_pull(skb, skb_network_offset(skb) + + sizeof(ip_hdrlen(skb) - sizeof(*iphdr)))) + return false; + + iphdrlen = ip_hdrlen(skb); + iphdr = ip_hdr(skb); + match = !filters->ipv4_filter.ipv4_addr || + filters->ipv4_filter.ipv4_addr == iphdr->daddr; + + skb_set_transport_header(skb, skb_network_offset(skb) + iphdrlen); + + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + case IPPROTO_TCP: + /* + * UDP header is shorter than TCP header and we look at the first bytes + * of the header anyway (see below). + * If we have a truncated TCP packet, let CSME handle this. + */ + if (!pskb_may_pull(skb, skb_transport_offset(skb) + + sizeof(struct udphdr))) + return false; + + return iwl_mei_rx_filter_tcp_udp(skb, match, + filters, rx_handler_res); + + case IPPROTO_ICMP: { + struct icmphdr *icmp; + + if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(*icmp))) + return false; + + icmp = icmp_hdr(skb); + + /* + * Don't pass echo requests to ME even if it wants it as we + * want the host to answer. + */ + if ((filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_PASS)) && + match && (icmp->type != ICMP_ECHO || icmp->code != 0)) { + if (filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_COPY)) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + return true; + } + break; + } + case IPPROTO_ICMPV6: + /* TODO: Should we have the same ICMP request logic here too? */ + if ((filters->icmpv6_flags & cpu_to_le32(SAP_ICMPV6_FILTER_ENABLED) && + match)) { + if (filters->icmpv6_flags & + cpu_to_le32(SAP_ICMPV6_FILTER_COPY)) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + return true; + } + break; + default: + return false; + } + + return false; +} + +static bool iwl_mei_rx_filter_ipv6(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + rx_handler_result_t *rx_handler_res) +{ + *rx_handler_res = RX_HANDLER_PASS; + + /* TODO */ + + return false; +} + +static rx_handler_result_t +iwl_mei_rx_pass_to_csme(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + bool *pass_to_csme) +{ + const struct ethhdr *ethhdr = (void *)skb_mac_header(skb); + rx_handler_result_t rx_handler_res = RX_HANDLER_PASS; + bool (*filt_handler)(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + rx_handler_result_t *rx_handler_res); + + /* + * skb->data points the IP header / ARP header and the ETH header + * is in the headroom. + */ + skb_reset_network_header(skb); + + /* + * MCAST IP packets sent by us are received again here without + * an ETH header. Drop them here. + */ + if (!skb_mac_offset(skb)) + return RX_HANDLER_PASS; + + if (skb_headroom(skb) < sizeof(*ethhdr)) + return RX_HANDLER_PASS; + + if (iwl_mei_rx_filter_eth(ethhdr, filters, + pass_to_csme, &rx_handler_res)) + return rx_handler_res; + + switch (skb->protocol) { + case htons(ETH_P_IP): + filt_handler = iwl_mei_rx_filter_ipv4; + break; + case htons(ETH_P_ARP): + filt_handler = iwl_mei_rx_filter_arp; + break; + case htons(ETH_P_IPV6): + filt_handler = iwl_mei_rx_filter_ipv6; + break; + default: + *pass_to_csme = false; + return rx_handler_res; + } + + *pass_to_csme = filt_handler(skb, filters, &rx_handler_res); + + return rx_handler_res; +} + +rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *orig_skb, + const struct iwl_sap_oob_filters *filters, + bool *pass_to_csme) +{ + rx_handler_result_t ret; + struct sk_buff *skb; + + ret = iwl_mei_rx_pass_to_csme(orig_skb, filters, pass_to_csme); + + if (!*pass_to_csme) + return RX_HANDLER_PASS; + + if (ret == RX_HANDLER_PASS) + skb = skb_copy(orig_skb, GFP_ATOMIC); + else + skb = orig_skb; + + /* CSME wants the MAC header as well, push it back */ + skb_push(skb, skb->data - skb_mac_header(skb)); + + /* + * Add the packet that CSME wants to get to the ring. Don't send the + * Check Shared Area HECI message since this is not possible from the + * Rx context. The caller will schedule a worker to do just that. + */ + iwl_mei_add_data_to_ring(skb, false); + + /* + * In case we drop the packet, don't free it, the caller will do that + * for us + */ + if (ret == RX_HANDLER_PASS) + dev_kfree_skb(skb); + + return ret; +} + +#define DHCP_SERVER_PORT 67 +#define DHCP_CLIENT_PORT 68 +void iwl_mei_tx_copy_to_csme(struct sk_buff *origskb, unsigned int ivlen) +{ + struct ieee80211_hdr *hdr; + struct sk_buff *skb; + struct ethhdr ethhdr; + struct ethhdr *eth; + + /* Catch DHCP packets */ + if (origskb->protocol != htons(ETH_P_IP) || + ip_hdr(origskb)->protocol != IPPROTO_UDP || + udp_hdr(origskb)->source != htons(DHCP_CLIENT_PORT) || + udp_hdr(origskb)->dest != htons(DHCP_SERVER_PORT)) + return; + + /* + * We could be a bit less aggressive here and not copy everything, but + * this is very rare anyway, do don't bother much. + */ + skb = skb_copy(origskb, GFP_ATOMIC); + if (!skb) + return; + + skb->protocol = origskb->protocol; + + hdr = (void *)skb->data; + + memcpy(ethhdr.h_dest, ieee80211_get_DA(hdr), ETH_ALEN); + memcpy(ethhdr.h_source, ieee80211_get_SA(hdr), ETH_ALEN); + + /* + * Remove the ieee80211 header + IV + SNAP but leave the ethertype + * We still have enough headroom for the sap header. + */ + pskb_pull(skb, ieee80211_hdrlen(hdr->frame_control) + ivlen + 6); + eth = skb_push(skb, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source)); + memcpy(eth, ðhdr, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source)); + + iwl_mei_add_data_to_ring(skb, true); + + dev_kfree_skb(skb); +} +EXPORT_SYMBOL_GPL(iwl_mei_tx_copy_to_csme); diff --git a/drivers/net/wireless/intel/iwlwifi/mei/sap.h b/drivers/net/wireless/intel/iwlwifi/mei/sap.h new file mode 100644 index 000000000000..11e3009121cc --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/sap.h @@ -0,0 +1,733 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#ifndef __sap_h__ +#define __sap_h__ + +#include "mei/iwl-mei.h" + +/** + * DOC: Introduction + * + * SAP is the protocol used by the Intel Wireless driver (iwlwifi) + * and the wireless driver implemented in the CSME firmware. + * It allows to do several things: + * 1) Decide who is the owner of the device: CSME or the host + * 2) When the host is the owner of the device, CSME can still + * send and receive packets through iwlwifi. + * + * The protocol uses the ME interface (mei driver) to send + * messages to the CSME firmware. Those messages have a header + * &struct iwl_sap_me_msg_hdr and this header is followed + * by a payload. + * + * Since this messaging system cannot support high amounts of + * traffic, iwlwifi and the CSME firmware's WLAN driver have an + * addtional communication pipe to exchange information. The body + * of the message is copied to a shared area and the message that + * goes over the ME interface just signals the other side + * that a new message is waiting in the shared area. The ME + * interface is used only for signaling and not to transfer + * the payload. + * + * This shared area of memory is DMA'able mapped to be + * writable by both the CSME firmware and iwlwifi. It is + * mapped to address space of the device that controls the ME + * interface's DMA engine. Any data that iwlwifi needs to + * send to the CSME firmware needs to be copied to there. + */ + +/** + * DOC: Initial Handshake + * + * Once we get a link to the CMSE's WLAN driver we start the handshake + * to establish the shared memory that will allow the communication between + * the CSME's WLAN driver and the host. + * + * 1) Host sends %SAP_ME_MSG_START message with the physical address + * of the shared area. + * 2) CSME replies with %SAP_ME_MSG_START_OK which includes the versions + * protocol versions supported by CSME. + */ + +/** + * DOC: Host and driver state messages + * + * In order to let CSME konw about the host state and the host driver state, + * the host sends messages that let CSME know about the host's state. + * When the host driver is loaded, the host sends %SAP_MSG_NOTIF_WIFIDR_UP. + * When the host driver is unloaded, the host sends %SAP_MSG_NOTIF_WIFIDR_DOWN. + * When the iwlmei is unloaded, %SAP_MSG_NOTIF_HOST_GOES_DOWN is sent to let + * CSME know not to access the shared memory anymore since it'll be freed. + * + * CSME will reply to SAP_MSG_NOTIF_WIFIDR_UP by + * %SAP_MSG_NOTIF_AMT_STATE to let the host driver whether CSME can use the + * WiFi device or not followed by %SAP_MSG_NOTIF_CSME_CONN_STATUS to inform + * the host driver on the connection state of CSME. + * + * When host is associated to an AP, it must send %SAP_MSG_NOTIF_HOST_LINK_UP + * and when it disconnect from the AP, it must send + * %SAP_MSG_NOTIF_HOST_LINK_DOWN. + */ + +/** + * DOC: Ownership + * + * The device can be controlled either by the CSME firmware or + * by the host driver: iwlwifi. There is a negotiaion between + * those two entities to determine who controls (or owns) the + * device. Since the CSME can control the device even when the + * OS is not working or even missing, the CSME can request the + * device if it comes to the conclusion that the OS's host driver + * is not operational. This is why the host driver needs to + * signal CSME that it is up and running. If the driver is + * unloaded, it'll signal CSME that it is going down so that + * CSME can take ownership. + */ + +/** + * DOC: Ownership transfer + * + * When the host driver needs the device, it'll send the + * %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP that will be replied by + * %SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ which will let the + * host know whether the ownership is granted or no. If the ownership is + * granted, the hosts sends %SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED. + * + * When CSME requests ownership, it'll send the + * %SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP and give some time to host to stop + * accessing the device. The host needs to send + * %SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED to confirm that it won't access + * the device anymore. If the host failed to send this message fast enough, + * CSME will take ownership on the device anyway. + * When CSME is willing to release the ownership, it'll send + * %SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP. + */ + +/** + * DOC: Data messages + * + * Data messages must be sent and receives on a separate queue in the shared + * memory. Almost all the data messages use the %SAP_MSG_DATA_PACKET for both + * packets sent by CSME to the host to be sent to the AP or for packets + * received from the AP and sent by the host to CSME. + * CSME sends filters to the host to let the host what inbound packets it must + * send to CSME. Those filters are received by the host as a + * %SAP_MSG_NOTIF_CSME_FILTERS command. + * The only outbound packets that must be sent to CSME are the DHCP packets. + * Those packets must use the %SAP_MSG_CB_DATA_PACKET message. + */ + +/** + * enum iwl_sap_me_msg_id - the ID of the ME message + * @SAP_ME_MSG_START: See &struct iwl_sap_me_msg_start. + * @SAP_ME_MSG_START_OK: See &struct iwl_sap_me_msg_start_ok. + * @SAP_ME_MSG_CHECK_SHARED_AREA: This message has no payload. + */ +enum iwl_sap_me_msg_id { + SAP_ME_MSG_START = 1, + SAP_ME_MSG_START_OK, + SAP_ME_MSG_CHECK_SHARED_AREA, +}; + +/** + * struct iwl_sap_me_msg_hdr - the header of the ME message + * @type: the type of the message, see &enum iwl_sap_me_msg_id. + * @seq_num: a sequence number used for debug only. + * @len: the length of the mssage. + */ +struct iwl_sap_me_msg_hdr { + __le32 type; + __le32 seq_num; + __le32 len; +} __packed; + +/** + * struct iwl_sap_me_msg_start - used for the %SAP_ME_MSG_START message + * @hdr: See &struct iwl_sap_me_msg_hdr. + * @shared_mem: physical address of SAP shared memory area. + * @init_data_seq_num: seq_num of the first data packet HOST -> CSME. + * @init_notif_seq_num: seq_num of the first notification HOST -> CSME. + * @supported_versions: The host sends to the CSME a zero-terminated array + * of versions its supports. + * + * This message is sent by the host to CSME and will responded by the + * %SAP_ME_MSG_START_OK message. + */ +struct iwl_sap_me_msg_start { + struct iwl_sap_me_msg_hdr hdr; + __le64 shared_mem; + __le16 init_data_seq_num; + __le16 init_notif_seq_num; + u8 supported_versions[64]; +} __packed; + +/** + * struct iwl_sap_me_msg_start_ok - used for the %SAP_ME_MSG_START_OK + * @hdr: See &struct iwl_sap_me_msg_hdr + * @init_data_seq_num: Not used. + * @init_notif_seq_num: Not used + * @supported_version: The version that will be used. + * @reserved: For alignment. + * + * This message is sent by CSME to the host in response to the + * %SAP_ME_MSG_START message. + */ +struct iwl_sap_me_msg_start_ok { + struct iwl_sap_me_msg_hdr hdr; + __le16 init_data_seq_num; + __le16 init_notif_seq_num; + u8 supported_version; + u8 reserved[3]; +} __packed; + +/** + * enum iwl_sap_msg - SAP messages + * @SAP_MSG_NOTIF_BOTH_WAYS_MIN: Not used. + * @SAP_MSG_NOTIF_PING: No payload. Solicitate a response message (check-alive). + * @SAP_MSG_NOTIF_PONG: No payload. The response message. + * @SAP_MSG_NOTIF_BOTH_WAYS_MAX: Not used. + * + * @SAP_MSG_NOTIF_FROM_CSME_MIN: Not used. + * @SAP_MSG_NOTIF_CSME_FILTERS: TODO + * @SAP_MSG_NOTIF_AMT_STATE: Payload is a DW. Any non-zero value means + * that CSME is enabled. + * @SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ: Payload is a DW. 0 means + * the host will not get ownership. Any other value means the host is + * the owner. + * @SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP: No payload. + * @SAP_MSG_NOTIF_TRIGGER_IP_REFRESH: No payload. + * @SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP: No payload. + * @SAP_MSG_NOTIF_NIC_OWNER: Payload is a DW. See &enum iwl_sap_nic_owner. + * @SAP_MSG_NOTIF_CSME_CONN_STATUS: See &struct iwl_sap_notif_conn_status. + * @SAP_MSG_NOTIF_NVM: See &struct iwl_sap_nvm. + * @SAP_MSG_NOTIF_FROM_CSME_MAX: Not used. + * + * @SAP_MSG_NOTIF_FROM_HOST_MIN: Not used. + * @SAP_MSG_NOTIF_BAND_SELECTION: TODO + * @SAP_MSG_NOTIF_RADIO_STATE: Payload is a DW. + * See &enum iwl_sap_radio_state_bitmap. + * @SAP_MSG_NOTIF_NIC_INFO: See &struct iwl_sap_notif_host_nic_info. + * @SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP: No payload. + * @SAP_MSG_NOTIF_HOST_SUSPENDS: Payload is a DW. Bitmap described in + * &enum iwl_sap_notif_host_suspends_bitmap. + * @SAP_MSG_NOTIF_HOST_RESUMES: Payload is a DW. 0 or 1. 1 says that + * the CSME should re-initialize the init control block. + * @SAP_MSG_NOTIF_HOST_GOES_DOWN: No payload. + * @SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED: No payload. + * @SAP_MSG_NOTIF_COUNTRY_CODE: See &struct iwl_sap_notif_country_code. + * @SAP_MSG_NOTIF_HOST_LINK_UP: See &struct iwl_sap_notif_host_link_up. + * @SAP_MSG_NOTIF_HOST_LINK_DOWN: See &struct iwl_sap_notif_host_link_down. + * @SAP_MSG_NOTIF_WHO_OWNS_NIC: No payload. + * @SAP_MSG_NOTIF_WIFIDR_DOWN: No payload. + * @SAP_MSG_NOTIF_WIFIDR_UP: No payload. + * @SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED: No payload. + * @SAP_MSG_NOTIF_SAR_LIMITS: See &struct iwl_sap_notif_sar_limits. + * @SAP_MSG_NOTIF_GET_NVM: No payload. Triggers %SAP_MSG_NOTIF_NVM. + * @SAP_MSG_NOTIF_FROM_HOST_MAX: Not used. + * + * @SAP_MSG_DATA_MIN: Not used. + * @SAP_MSG_DATA_PACKET: Packets that passed the filters defined by + * %SAP_MSG_NOTIF_CSME_FILTERS. The payload is &struct iwl_sap_hdr with + * the payload of the packet immediately afterwards. + * @SAP_MSG_CB_DATA_PACKET: Indicates to CSME that we transmitted a specific + * packet. Used only for DHCP transmitted packets. See + * &struct iwl_sap_cb_data. + * @SAP_MSG_DATA_MAX: Not used. + */ +enum iwl_sap_msg { + SAP_MSG_NOTIF_BOTH_WAYS_MIN = 0, + SAP_MSG_NOTIF_PING = 1, + SAP_MSG_NOTIF_PONG = 2, + SAP_MSG_NOTIF_BOTH_WAYS_MAX, + + SAP_MSG_NOTIF_FROM_CSME_MIN = 500, + SAP_MSG_NOTIF_CSME_FILTERS = SAP_MSG_NOTIF_FROM_CSME_MIN, + /* 501 is deprecated */ + SAP_MSG_NOTIF_AMT_STATE = 502, + SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ = 503, + SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP = 504, + SAP_MSG_NOTIF_TRIGGER_IP_REFRESH = 505, + SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP = 506, + /* 507 is deprecated */ + /* 508 is deprecated */ + /* 509 is deprecated */ + /* 510 is deprecated */ + SAP_MSG_NOTIF_NIC_OWNER = 511, + SAP_MSG_NOTIF_CSME_CONN_STATUS = 512, + SAP_MSG_NOTIF_NVM = 513, + SAP_MSG_NOTIF_FROM_CSME_MAX, + + SAP_MSG_NOTIF_FROM_HOST_MIN = 1000, + SAP_MSG_NOTIF_BAND_SELECTION = SAP_MSG_NOTIF_FROM_HOST_MIN, + SAP_MSG_NOTIF_RADIO_STATE = 1001, + SAP_MSG_NOTIF_NIC_INFO = 1002, + SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP = 1003, + SAP_MSG_NOTIF_HOST_SUSPENDS = 1004, + SAP_MSG_NOTIF_HOST_RESUMES = 1005, + SAP_MSG_NOTIF_HOST_GOES_DOWN = 1006, + SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED = 1007, + SAP_MSG_NOTIF_COUNTRY_CODE = 1008, + SAP_MSG_NOTIF_HOST_LINK_UP = 1009, + SAP_MSG_NOTIF_HOST_LINK_DOWN = 1010, + SAP_MSG_NOTIF_WHO_OWNS_NIC = 1011, + SAP_MSG_NOTIF_WIFIDR_DOWN = 1012, + SAP_MSG_NOTIF_WIFIDR_UP = 1013, + /* 1014 is deprecated */ + SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED = 1015, + SAP_MSG_NOTIF_SAR_LIMITS = 1016, + SAP_MSG_NOTIF_GET_NVM = 1017, + SAP_MSG_NOTIF_FROM_HOST_MAX, + + SAP_MSG_DATA_MIN = 2000, + SAP_MSG_DATA_PACKET = SAP_MSG_DATA_MIN, + SAP_MSG_CB_DATA_PACKET = 2001, + SAP_MSG_DATA_MAX, +}; + +/** + * struct iwl_sap_hdr - prefixes any SAP message + * @type: See &enum iwl_sap_msg. + * @len: The length of the message (header not included). + * @seq_num: For debug. + * @payload: The payload of the message. + */ +struct iwl_sap_hdr { + __le16 type; + __le16 len; + __le32 seq_num; + u8 payload[0]; +}; + +/** + * struct iwl_sap_msg_dw - suits any DW long SAP message + * @hdr: The SAP header + * @val: The value of the DW. + */ +struct iwl_sap_msg_dw { + struct iwl_sap_hdr hdr; + __le32 val; +}; + +/** + * enum iwl_sap_nic_owner - used by %SAP_MSG_NOTIF_NIC_OWNER + * @SAP_NIC_OWNER_UNKNOWN: Not used. + * @SAP_NIC_OWNER_HOST: The host owns the NIC. + * @SAP_NIC_OWNER_ME: CSME owns the NIC. + */ +enum iwl_sap_nic_owner { + SAP_NIC_OWNER_UNKNOWN, + SAP_NIC_OWNER_HOST, + SAP_NIC_OWNER_ME, +}; + +enum iwl_sap_wifi_auth_type { + SAP_WIFI_AUTH_TYPE_OPEN = IWL_MEI_AKM_AUTH_OPEN, + SAP_WIFI_AUTH_TYPE_RSNA = IWL_MEI_AKM_AUTH_RSNA, + SAP_WIFI_AUTH_TYPE_RSNA_PSK = IWL_MEI_AKM_AUTH_RSNA_PSK, + SAP_WIFI_AUTH_TYPE_SAE = IWL_MEI_AKM_AUTH_SAE, + SAP_WIFI_AUTH_TYPE_MAX, +}; + +/** + * enum iwl_sap_wifi_cipher_alg + * @SAP_WIFI_CIPHER_ALG_NONE: TBD + * @SAP_WIFI_CIPHER_ALG_CCMP: TBD + * @SAP_WIFI_CIPHER_ALG_GCMP: TBD + * @SAP_WIFI_CIPHER_ALG_GCMP_256: TBD + */ +enum iwl_sap_wifi_cipher_alg { + SAP_WIFI_CIPHER_ALG_NONE = IWL_MEI_CIPHER_NONE, + SAP_WIFI_CIPHER_ALG_CCMP = IWL_MEI_CIPHER_CCMP, + SAP_WIFI_CIPHER_ALG_GCMP = IWL_MEI_CIPHER_GCMP, + SAP_WIFI_CIPHER_ALG_GCMP_256 = IWL_MEI_CIPHER_GCMP_256, +}; + +/** + * struct iwl_sap_notif_connection_info - nested in other structures + * @ssid_len: The length of the SSID. + * @ssid: The SSID. + * @auth_mode: The authentication mode. See &enum iwl_sap_wifi_auth_type. + * @pairwise_cipher: The cipher used for unicast packets. + * See &enum iwl_sap_wifi_cipher_alg. + * @channel: The channel on which we are associated. + * @band: The band on which we are associated. + * @reserved: For alignment. + * @bssid: The BSSID. + * @reserved1: For alignment. + */ +struct iwl_sap_notif_connection_info { + __le32 ssid_len; + u8 ssid[32]; + __le32 auth_mode; + __le32 pairwise_cipher; + u8 channel; + u8 band; + __le16 reserved; + u8 bssid[6]; + __le16 reserved1; +} __packed; + +/** + * enum iwl_sap_scan_request - for the scan_request field + * @SCAN_REQUEST_FILTERING: Filtering is requested. + * @SCAN_REQUEST_FAST: Fast scan is requested. + */ +enum iwl_sap_scan_request { + SCAN_REQUEST_FILTERING = 1 << 0, + SCAN_REQUEST_FAST = 1 << 1, +}; + +/** + * struct iwl_sap_notif_conn_status - payload of %SAP_MSG_NOTIF_CSME_CONN_STATUS + * @hdr: The SAP header + * @link_prot_state: Non-zero if link protection is active. + * @scan_request: See &enum iwl_sap_scan_request. + * @conn_info: Information about the connection. + */ +struct iwl_sap_notif_conn_status { + struct iwl_sap_hdr hdr; + __le32 link_prot_state; + __le32 scan_request; + struct iwl_sap_notif_connection_info conn_info; +} __packed; + +/** + * enum iwl_sap_radio_state_bitmap - used for %SAP_MSG_NOTIF_RADIO_STATE + * @SAP_SW_RFKILL_DEASSERTED: If set, SW RfKill is de-asserted + * @SAP_HW_RFKILL_DEASSERTED: If set, HW RfKill is de-asserted + * + * If both bits are set, then the radio is on. + */ +enum iwl_sap_radio_state_bitmap { + SAP_SW_RFKILL_DEASSERTED = 1 << 0, + SAP_HW_RFKILL_DEASSERTED = 1 << 1, +}; + +/** + * enum iwl_sap_notif_host_suspends_bitmap - used for %SAP_MSG_NOTIF_HOST_SUSPENDS + * @SAP_OFFER_NIC: TBD + * @SAP_FILTER_CONFIGURED: TBD + * @SAP_NLO_CONFIGURED: TBD + * @SAP_HOST_OWNS_NIC: TBD + * @SAP_LINK_PROTECTED: TBD + */ +enum iwl_sap_notif_host_suspends_bitmap { + SAP_OFFER_NIC = 1 << 0, + SAP_FILTER_CONFIGURED = 1 << 1, + SAP_NLO_CONFIGURED = 1 << 2, + SAP_HOST_OWNS_NIC = 1 << 3, + SAP_LINK_PROTECTED = 1 << 4, +}; + +/** + * struct iwl_sap_notif_country_code - payload of %SAP_MSG_NOTIF_COUNTRY_CODE + * @hdr: The SAP header + * @mcc: The country code. + * @source_id: TBD + * @reserved: For alignment. + * @diff_time: TBD + */ +struct iwl_sap_notif_country_code { + struct iwl_sap_hdr hdr; + __le16 mcc; + u8 source_id; + u8 reserved; + __le32 diff_time; +} __packed; + +/** + * struct iwl_sap_notif_host_link_up - payload of %SAP_MSG_NOTIF_HOST_LINK_UP + * @hdr: The SAP header + * @conn_info: Information about the connection. + * @colloc_channel: The collocated channel + * @colloc_band: The band of the collocated channel. + * @reserved: For alignment. + * @colloc_bssid: The collocated BSSID. + * @reserved1: For alignment. + */ +struct iwl_sap_notif_host_link_up { + struct iwl_sap_hdr hdr; + struct iwl_sap_notif_connection_info conn_info; + u8 colloc_channel; + u8 colloc_band; + __le16 reserved; + u8 colloc_bssid[6]; + __le16 reserved1; +} __packed; + +/** + * enum iwl_sap_notif_link_down_type - used in &struct iwl_sap_notif_host_link_down + * @HOST_LINK_DOWN_TYPE_NONE: TBD + * @HOST_LINK_DOWN_TYPE_TEMPORARY: TBD + * @HOST_LINK_DOWN_TYPE_LONG: TBD + */ +enum iwl_sap_notif_link_down_type { + HOST_LINK_DOWN_TYPE_NONE, + HOST_LINK_DOWN_TYPE_TEMPORARY, + HOST_LINK_DOWN_TYPE_LONG, +}; + +/** + * struct iwl_sap_notif_host_link_down - payload for %SAP_MSG_NOTIF_HOST_LINK_DOWN + * @hdr: The SAP header + * @type: See &enum iwl_sap_notif_link_down_type. + * @reserved: For alignment. + * @reason_valid: If 0, ignore the next field. + * @reason: The reason of the disconnection. + */ +struct iwl_sap_notif_host_link_down { + struct iwl_sap_hdr hdr; + u8 type; + u8 reserved[2]; + u8 reason_valid; + __le32 reason; +} __packed; + +/** + * struct iwl_sap_notif_host_nic_info - payload for %SAP_MSG_NOTIF_NIC_INFO + * @hdr: The SAP header + * @mac_address: The MAC address as configured to the interface. + * @nvm_address: The MAC address as configured in the NVM. + */ +struct iwl_sap_notif_host_nic_info { + struct iwl_sap_hdr hdr; + u8 mac_address[6]; + u8 nvm_address[6]; +} __packed; + +/** + * struct iwl_sap_notif_dw - payload is a dw + * @hdr: The SAP header. + * @dw: The payload. + */ +struct iwl_sap_notif_dw { + struct iwl_sap_hdr hdr; + __le32 dw; +} __packed; + +/** + * struct iwl_sap_notif_sar_limits - payload for %SAP_MSG_NOTIF_SAR_LIMITS + * @hdr: The SAP header + * @sar_chain_info_table: Tx power limits. + */ +struct iwl_sap_notif_sar_limits { + struct iwl_sap_hdr hdr; + __le16 sar_chain_info_table[2][5]; +} __packed; + +/** + * enum iwl_sap_nvm_caps - capabilities for NVM SAP + * @SAP_NVM_CAPS_LARI_SUPPORT: Lari is supported + * @SAP_NVM_CAPS_11AX_SUPPORT: 11AX is supported + */ +enum iwl_sap_nvm_caps { + SAP_NVM_CAPS_LARI_SUPPORT = BIT(0), + SAP_NVM_CAPS_11AX_SUPPORT = BIT(1), +}; + +/** + * struct iwl_sap_nvm - payload for %SAP_MSG_NOTIF_NVM + * @hdr: The SAP header. + * @hw_addr: The MAC address + * @n_hw_addrs: The number of MAC addresses + * @reserved: For alignment. + * @radio_cfg: The radio configuration. + * @caps: See &enum iwl_sap_nvm_caps. + * @nvm_version: The version of the NVM. + * @channels: The data for each channel. + */ +struct iwl_sap_nvm { + struct iwl_sap_hdr hdr; + u8 hw_addr[6]; + u8 n_hw_addrs; + u8 reserved; + __le32 radio_cfg; + __le32 caps; + __le32 nvm_version; + __le32 channels[110]; +} __packed; + +/** + * enum iwl_sap_eth_filter_flags - used in &struct iwl_sap_eth_filter + * @SAP_ETH_FILTER_STOP: Do not process further filters. + * @SAP_ETH_FILTER_COPY: Copy the packet to the CSME. + * @SAP_ETH_FILTER_ENABLED: If false, the filter should be ignored. + */ +enum iwl_sap_eth_filter_flags { + SAP_ETH_FILTER_STOP = BIT(0), + SAP_ETH_FILTER_COPY = BIT(1), + SAP_ETH_FILTER_ENABLED = BIT(2), +}; + +/** + * struct iwl_sap_eth_filter - a L2 filter + * @mac_address: Address to filter. + * @flags: See &enum iwl_sap_eth_filter_flags. + */ +struct iwl_sap_eth_filter { + u8 mac_address[6]; + u8 flags; +} __packed; + +/** + * enum iwl_sap_flex_filter_flags - used in &struct iwl_sap_flex_filter + * @SAP_FLEX_FILTER_COPY: Pass UDP / TCP packets to CSME. + * @SAP_FLEX_FILTER_ENABLED: If false, the filter should be ignored. + * @SAP_FLEX_FILTER_IPV4: Filter requires match on the IP address as well. + * @SAP_FLEX_FILTER_IPV6: Filter requires match on the IP address as well. + * @SAP_FLEX_FILTER_TCP: Filter should be applied on TCP packets. + * @SAP_FLEX_FILTER_UDP: Filter should be applied on UDP packets. + */ +enum iwl_sap_flex_filter_flags { + SAP_FLEX_FILTER_COPY = BIT(0), + SAP_FLEX_FILTER_ENABLED = BIT(1), + SAP_FLEX_FILTER_IPV6 = BIT(2), + SAP_FLEX_FILTER_IPV4 = BIT(3), + SAP_FLEX_FILTER_TCP = BIT(4), + SAP_FLEX_FILTER_UDP = BIT(5), +}; + +/** + * struct iwl_sap_flex_filter - + * @src_port: Source port in network format. + * @dst_port: Destination port in network format. + * @flags: Flags and protocol, see &enum iwl_sap_flex_filter_flags. + * @reserved: For alignment. + */ +struct iwl_sap_flex_filter { + __be16 src_port; + __be16 dst_port; + u8 flags; + u8 reserved; +} __packed; + +/** + * enum iwl_sap_ipv4_filter_flags - used in &struct iwl_sap_ipv4_filter + * @SAP_IPV4_FILTER_ICMP_PASS: Pass ICMP packets to CSME. + * @SAP_IPV4_FILTER_ICMP_COPY: Pass ICMP packets to host. + * @SAP_IPV4_FILTER_ARP_REQ_PASS: Pass ARP requests to CSME. + * @SAP_IPV4_FILTER_ARP_REQ_COPY: Pass ARP requests to host. + * @SAP_IPV4_FILTER_ARP_RESP_PASS: Pass ARP responses to CSME. + * @SAP_IPV4_FILTER_ARP_RESP_COPY: Pass ARP responses to host. + */ +enum iwl_sap_ipv4_filter_flags { + SAP_IPV4_FILTER_ICMP_PASS = BIT(0), + SAP_IPV4_FILTER_ICMP_COPY = BIT(1), + SAP_IPV4_FILTER_ARP_REQ_PASS = BIT(2), + SAP_IPV4_FILTER_ARP_REQ_COPY = BIT(3), + SAP_IPV4_FILTER_ARP_RESP_PASS = BIT(4), + SAP_IPV4_FILTER_ARP_RESP_COPY = BIT(5), +}; + +/** + * struct iwl_sap_ipv4_filter- + * @ipv4_addr: The IP address to filer. + * @flags: See &enum iwl_sap_ipv4_filter_flags. + */ +struct iwl_sap_ipv4_filter { + __be32 ipv4_addr; + __le32 flags; +} __packed; + +/** + * enum iwl_sap_ipv6_filter_flags - + * @SAP_IPV6_ADDR_FILTER_COPY: Pass packets to the host. + * @SAP_IPV6_ADDR_FILTER_ENABLED: If false, the filter should be ignored. + */ +enum iwl_sap_ipv6_filter_flags { + SAP_IPV6_ADDR_FILTER_COPY = BIT(0), + SAP_IPV6_ADDR_FILTER_ENABLED = BIT(1), +}; + +/** + * struct iwl_sap_ipv6_filter - + * @addr_lo24: Lowest 24 bits of the IPv6 address. + * @flags: See &enum iwl_sap_ipv6_filter_flags. + */ +struct iwl_sap_ipv6_filter { + u8 addr_lo24[3]; + u8 flags; +} __packed; + +/** + * enum iwl_sap_icmpv6_filter_flags - + * @SAP_ICMPV6_FILTER_ENABLED: If false, the filter should be ignored. + * @SAP_ICMPV6_FILTER_COPY: Pass packets to the host. + */ +enum iwl_sap_icmpv6_filter_flags { + SAP_ICMPV6_FILTER_ENABLED = BIT(0), + SAP_ICMPV6_FILTER_COPY = BIT(1), +}; + +/** + * enum iwl_sap_vlan_filter_flags - + * @SAP_VLAN_FILTER_VLAN_ID_MSK: TBD + * @SAP_VLAN_FILTER_ENABLED: If false, the filter should be ignored. + */ +enum iwl_sap_vlan_filter_flags { + SAP_VLAN_FILTER_VLAN_ID_MSK = 0x0FFF, + SAP_VLAN_FILTER_ENABLED = BIT(15), +}; + +/** + * struct iwl_sap_oob_filters - Out of band filters (for RX only) + * @flex_filters: Array of &struct iwl_sap_flex_filter. + * @icmpv6_flags: See &enum iwl_sap_icmpv6_filter_flags. + * @ipv6_filters: Array of &struct iwl_sap_ipv6_filter. + * @eth_filters: Array of &struct iwl_sap_eth_filter. + * @reserved: For alignment. + * @ipv4_filter: &struct iwl_sap_ipv4_filter. + * @vlan: See &enum iwl_sap_vlan_filter_flags. + */ +struct iwl_sap_oob_filters { + struct iwl_sap_flex_filter flex_filters[14]; + __le32 icmpv6_flags; + struct iwl_sap_ipv6_filter ipv6_filters[4]; + struct iwl_sap_eth_filter eth_filters[5]; + u8 reserved; + struct iwl_sap_ipv4_filter ipv4_filter; + __le16 vlan[4]; +} __packed; + +/** + * struct iwl_sap_csme_filters - payload of %SAP_MSG_NOTIF_CSME_FILTERS + * @hdr: The SAP header. + * @mode: Not used. + * @mac_address: Not used. + * @reserved: For alignment. + * @cbfilters: Not used. + * @filters: Out of band filters. + */ +struct iwl_sap_csme_filters { + struct iwl_sap_hdr hdr; + __le32 mode; + u8 mac_address[6]; + __le16 reserved; + u8 cbfilters[1728]; + struct iwl_sap_oob_filters filters; +} __packed; + +#define CB_TX_DHCP_FILT_IDX 30 +/** + * struct iwl_sap_cb_data - header to be added for transmitted packets. + * @hdr: The SAP header. + * @reserved: Not used. + * @to_me_filt_status: The filter that matches. Bit %CB_TX_DHCP_FILT_IDX should + * be set for DHCP (the only packet that uses this header). + * @reserved2: Not used. + * @data_len: The length of the payload. + * @payload: The payload of the transmitted packet. + */ +struct iwl_sap_cb_data { + struct iwl_sap_hdr hdr; + __le32 reserved[7]; + __le32 to_me_filt_status; + __le32 reserved2; + __le32 data_len; + u8 payload[]; +}; + +#endif /* __sap_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/trace-data.h b/drivers/net/wireless/intel/iwlwifi/mei/trace-data.h new file mode 100644 index 000000000000..83639c6225ca --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/trace-data.h @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2021 Intel Corporation + */ + +#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) + +#define trace_iwlmei_sap_data(...) + +#else + +#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA) || defined(TRACE_HEADER_MULTI_READ) + +#ifndef __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA +enum iwl_sap_data_trace_type { + IWL_SAP_RX_DATA_TO_AIR, + IWL_SAP_TX_DATA_FROM_AIR, + IWL_SAP_RX_DATA_DROPPED_FROM_AIR, + IWL_SAP_TX_DHCP, +}; + +static inline size_t +iwlmei_sap_data_offset(enum iwl_sap_data_trace_type trace_type) +{ + switch (trace_type) { + case IWL_SAP_RX_DATA_TO_AIR: + return 0; + case IWL_SAP_TX_DATA_FROM_AIR: + case IWL_SAP_RX_DATA_DROPPED_FROM_AIR: + return sizeof(struct iwl_sap_hdr); + case IWL_SAP_TX_DHCP: + return sizeof(struct iwl_sap_cb_data); + default: + WARN_ON_ONCE(1); + } + + return 0; +} +#endif + +#define __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA + +#include <linux/tracepoint.h> +#include <linux/skbuff.h> +#include "sap.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlmei_sap_data + +TRACE_EVENT(iwlmei_sap_data, + TP_PROTO(const struct sk_buff *skb, + enum iwl_sap_data_trace_type trace_type), + TP_ARGS(skb, trace_type), + TP_STRUCT__entry( + __dynamic_array(u8, data, + skb->len - iwlmei_sap_data_offset(trace_type)) + __field(u32, trace_type) + ), + TP_fast_assign( + size_t offset = iwlmei_sap_data_offset(trace_type); + __entry->trace_type = trace_type; + skb_copy_bits(skb, offset, __get_dynamic_array(data), + skb->len - offset); + ), + TP_printk("sap_data:trace_type %d len %d", + __entry->trace_type, __get_dynamic_array_len(data)) +); + +/* + * If you add something here, add a stub in case + * !defined(CONFIG_IWLWIFI_DEVICE_TRACING) + */ + +#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace-data +#include <trace/define_trace.h> + +#endif /* CONFIG_IWLWIFI_DEVICE_TRACING */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/trace.c b/drivers/net/wireless/intel/iwlwifi/mei/trace.c new file mode 100644 index 000000000000..47ac32ef9f69 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/trace.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#include <linux/module.h> + +/* sparse doesn't like tracepoint macros */ +#ifndef __CHECKER__ + +#define CREATE_TRACE_POINTS +#include "trace.h" +#include "trace-data.h" + +#endif /* __CHECKER__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/trace.h b/drivers/net/wireless/intel/iwlwifi/mei/trace.h new file mode 100644 index 000000000000..45ecb22ec84a --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/trace.h @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2021 Intel Corporation + */ + +#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) + +#define trace_iwlmei_sap_cmd(...) +#define trace_iwlmei_me_msg(...) + +#else + +#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD) || defined(TRACE_HEADER_MULTI_READ) +#define __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlmei_sap_cmd + +#include "mei/sap.h" + +TRACE_EVENT(iwlmei_sap_cmd, + TP_PROTO(const struct iwl_sap_hdr *sap_cmd, bool tx), + TP_ARGS(sap_cmd, tx), + TP_STRUCT__entry( + __dynamic_array(u8, cmd, + le16_to_cpu(sap_cmd->len) + sizeof(*sap_cmd)) + __field(u8, tx) + __field(u16, type) + __field(u16, len) + __field(u32, seq) + ), + TP_fast_assign( + memcpy(__get_dynamic_array(cmd), sap_cmd, + le16_to_cpu(sap_cmd->len) + sizeof(*sap_cmd)); + __entry->tx = tx; + __entry->type = le16_to_cpu(sap_cmd->type); + __entry->len = le16_to_cpu(sap_cmd->len); + __entry->seq = le32_to_cpu(sap_cmd->seq_num); + ), + TP_printk("sap_cmd %s: type %d len %d seq %d", __entry->tx ? "Tx" : "Rx", + __entry->type, __entry->len, __entry->seq) +); + +TRACE_EVENT(iwlmei_me_msg, + TP_PROTO(const struct iwl_sap_me_msg_hdr *hdr, bool tx), + TP_ARGS(hdr, tx), + TP_STRUCT__entry( + __field(u8, type) + __field(u8, tx) + __field(u32, seq_num) + ), + TP_fast_assign( + __entry->type = le32_to_cpu(hdr->type); + __entry->seq_num = le32_to_cpu(hdr->seq_num); + __entry->tx = tx; + ), + TP_printk("ME message: %s: type %d seq %d", __entry->tx ? "Tx" : "Rx", + __entry->type, __entry->seq_num) +); + +/* + * If you add something here, add a stub in case + * !defined(CONFIG_IWLWIFI_DEVICE_TRACING) + */ + +#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace +#include <trace/define_trace.h> + +#endif /* CONFIG_IWLWIFI_DEVICE_TRACING */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 75fc2d935e5d..11e814b7cad0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -10,5 +10,6 @@ iwlmvm-y += rfi.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o iwlmvm-$(CONFIG_PM) += d3.o +iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o ccflags-y += -I $(srctree)/$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 863fec150e53..6ce78c03e51f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -757,6 +757,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) if (ret) return ret; + iwl_mei_set_power_limit(per_chain); + IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); } @@ -1401,7 +1403,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm); - WARN_ON(!mvm->nvm_data); ret = iwl_run_init_mvm_ucode(mvm); if (ret) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 9fb9c7dad314..ede9fb1ebf2e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -16,6 +16,7 @@ #include <net/ieee80211_radiotap.h> #include <net/tcp.h> +#include "iwl-drv.h" #include "iwl-op-mode.h" #include "iwl-io.h" #include "mvm.h" @@ -190,6 +191,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, if (IS_ERR_OR_NULL(resp)) { IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", PTR_ERR_OR_ZERO(resp)); + resp = NULL; goto out; } @@ -211,7 +213,6 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, __le16_to_cpu(resp->cap), resp_ver); /* Store the return source id */ src_id = resp->source_id; - kfree(resp); if (IS_ERR_OR_NULL(regd)) { IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", PTR_ERR_OR_ZERO(regd)); @@ -223,7 +224,10 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, mvm->lar_regdom_set = true; mvm->mcc_src = src_id; + iwl_mei_set_country_code(__le16_to_cpu(resp->mcc)); + out: + kfree(resp); return regd; } @@ -717,6 +721,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT); + iwl_mvm_vendor_cmds_register(mvm); + hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); @@ -1083,6 +1089,27 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); + ret = iwl_mvm_mei_get_ownership(mvm); + if (ret) + return ret; + + if (mvm->mei_nvm_data) { + /* We got the NIC, we can now free the MEI NVM data */ + kfree(mvm->mei_nvm_data); + mvm->mei_nvm_data = NULL; + + /* + * We can't free the nvm_data we allocated based on the SAP + * data because we registered to cfg80211 with the channels + * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data + * just in order to be able free it later. + * NULLify nvm_data so that we will read the NVM from the + * firmware this time. + */ + mvm->temp_nvm_data = mvm->nvm_data; + mvm->nvm_data = NULL; + } + if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { /* * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART @@ -1117,11 +1144,34 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; + int retry, max_retry = 0; mutex_lock(&mvm->mutex); - ret = __iwl_mvm_mac_start(mvm); + + /* we are starting the mac not in error flow, and restart is enabled */ + if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && + iwlwifi_mod_params.fw_restart) { + max_retry = IWL_MAX_INIT_RETRY; + /* + * This will prevent mac80211 recovery flows to trigger during + * init failures + */ + set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); + } + + for (retry = 0; retry <= max_retry; retry++) { + ret = __iwl_mvm_mac_start(mvm); + if (!ret) + break; + + IWL_ERR(mvm, "mac start retry %d\n", retry); + } + clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); + mutex_unlock(&mvm->mutex); + iwl_mvm_mei_set_sw_rfkill_state(mvm); + return ret; } @@ -1239,6 +1289,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) */ flush_work(&mvm->roc_done_wk); + iwl_mvm_mei_set_sw_rfkill_state(mvm); + mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); mutex_unlock(&mvm->mutex); @@ -1509,6 +1561,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mvm->monitor_on = true; iwl_mvm_vif_dbgfs_register(mvm, vif); + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + !mvm->csme_vif && mvm->mei_registered) { + iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); + iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); + mvm->csme_vif = vif; + } + goto out_unlock; out_unbind: @@ -1561,6 +1622,11 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + if (vif == mvm->csme_vif) { + iwl_mei_set_netdev(NULL); + mvm->csme_vif = NULL; + } + probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); @@ -2371,6 +2437,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IEEE80211_SMPS_DYNAMIC); } } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { + iwl_mvm_mei_host_disassociated(mvm); /* * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. @@ -3107,6 +3174,69 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, } } +static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_sta *mvm_sta) +{ +#if IS_ENABLED(CONFIG_IWLMEI) + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mei_conn_info conn_info = { + .ssid_len = vif->bss_conf.ssid_len, + .channel = vif->bss_conf.chandef.chan->hw_value, + }; + + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return; + + if (!mvm->mei_registered) + return; + + switch (mvm_sta->pairwise_cipher) { + case WLAN_CIPHER_SUITE_CCMP: + conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP: + conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256; + break; + case 0: + /* open profile */ + break; + default: + /* cipher not supported, don't send anything to iwlmei */ + return; + } + + switch (mvmvif->rekey_data.akm) { + case WLAN_AKM_SUITE_SAE & 0xff: + conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE; + break; + case WLAN_AKM_SUITE_PSK & 0xff: + conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK; + break; + case WLAN_AKM_SUITE_8021X & 0xff: + conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA; + break; + case 0: + /* open profile */ + conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN; + break; + default: + /* auth method / AKM not supported */ + /* TODO: All the FT vesions of these? */ + return; + } + + memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len); + memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN); + + /* TODO: add support for collocated AP data */ + iwl_mei_host_associated(&conn_info, NULL); +#endif +} + static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -3251,6 +3381,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, * multicast data frames can be forwarded to the driver */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, @@ -3460,6 +3591,8 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, int ret, i; u8 key_offset; + mvmsta = iwl_mvm_sta_from_mac80211(sta); + switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (!mvm->trans->trans_cfg->gen2) { @@ -3568,7 +3701,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, struct ieee80211_key_seq seq; int tid, q; - mvmsta = iwl_mvm_sta_from_mac80211(sta); WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); ptk_pn = kzalloc(struct_size(ptk_pn, q, mvm->trans->num_rx_queues), @@ -3595,6 +3727,9 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, else key_offset = STA_KEY_IDX_INVALID; + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + mvmsta->pairwise_cipher = key->cipher; + IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { @@ -3640,7 +3775,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { - mvmsta = iwl_mvm_sta_from_mac80211(sta); ptk_pn = rcu_dereference_protected( mvmsta->ptk_pn[keyidx], lockdep_is_held(&mvm->mutex)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 2b1dcd60e00f..8cd0ed096010 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -30,6 +30,7 @@ #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" +#include "mei/iwl-mei.h" #include "iwl-nvm-parse.h" #include <linux/average.h> @@ -830,6 +831,18 @@ struct iwl_mvm { const char *nvm_file_name; struct iwl_nvm_data *nvm_data; + struct iwl_mei_nvm *mei_nvm_data; + struct iwl_mvm_csme_conn_info __rcu *csme_conn_info; + bool mei_rfkill_blocked; + bool mei_registered; + struct work_struct sap_connected_wk; + + /* + * NVM built based on the SAP data but that we can't free even after + * we get ownership because it contains the cfg80211's channel. + */ + struct iwl_nvm_data *temp_nvm_data; + /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; @@ -1021,6 +1034,8 @@ struct iwl_mvm { /* Indicate if 32Khz external clock is valid */ u32 ext_clock_valid; + /* This vif used by CSME to send / receive traffic */ + struct ieee80211_vif *csme_vif; struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif; u8 csa_tx_block_bcn_timeout; @@ -1123,6 +1138,8 @@ struct iwl_mvm { * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it) + * @IWL_MVM_STATUS_STARTING: starting mac, + * used to disable restart flow while in STARTING state */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, @@ -1134,6 +1151,12 @@ enum iwl_mvm_status { IWL_MVM_STATUS_FIRMWARE_RUNNING, IWL_MVM_STATUS_NEED_FLUSH_P2P, IWL_MVM_STATUS_IN_D3, + IWL_MVM_STATUS_STARTING, +}; + +struct iwl_mvm_csme_conn_info { + struct rcu_head rcu_head; + struct iwl_mei_conn_info conn_info; }; /* Keep track of completed init configuration */ @@ -1939,6 +1962,17 @@ void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm); int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); +#if IS_ENABLED(CONFIG_IWLMEI) + +/* vendor commands */ +void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm); + +#else + +static inline void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) {} + +#endif + /* Location Aware Regulatory */ struct iwl_mcc_update_resp * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, @@ -2158,4 +2192,47 @@ enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher) return IWL_LOCATION_CIPHER_INVALID; } } + +struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm); +static inline int iwl_mvm_mei_get_ownership(struct iwl_mvm *mvm) +{ + if (mvm->mei_registered) + return iwl_mei_get_ownership(); + return 0; +} + +static inline void iwl_mvm_mei_tx_copy_to_csme(struct iwl_mvm *mvm, + struct sk_buff *skb, + unsigned int ivlen) +{ + if (mvm->mei_registered) + iwl_mei_tx_copy_to_csme(skb, ivlen); +} + +static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm) +{ + if (mvm->mei_registered) + iwl_mei_host_disassociated(); +} + +static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm) +{ + if (mvm->mei_registered) + iwl_mei_device_down(); +} + +static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) +{ + bool sw_rfkill = + mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false; + + if (mvm->mei_registered) + iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm), + sw_rfkill); +} + +void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool forbidden); + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 232ad531d612..0cd92b3915f1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -683,13 +683,43 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) { + struct iwl_trans *trans = mvm->trans; int ret; + if (trans->csme_own) { + if (WARN(!mvm->mei_registered, + "csme is owner, but we aren't registered to iwlmei\n")) + goto get_nvm_from_fw; + + mvm->mei_nvm_data = iwl_mei_get_nvm(); + if (mvm->mei_nvm_data) { + /* + * mvm->mei_nvm_data is set and because of that, + * we'll load the NVM from the FW when we'll get + * ownership. + */ + mvm->nvm_data = + iwl_parse_mei_nvm_data(trans, trans->cfg, + mvm->mei_nvm_data, mvm->fw); + return 0; + } + + IWL_ERR(mvm, + "Got a NULL NVM from CSME, trying to get it from the device\n"); + } + +get_nvm_from_fw: rtnl_lock(); + wiphy_lock(mvm->hw->wiphy); mutex_lock(&mvm->mutex); - ret = iwl_run_init_mvm_ucode(mvm); + ret = iwl_trans_start_hw(mvm->trans); + if (ret) { + mutex_unlock(&mvm->mutex); + return ret; + } + ret = iwl_run_init_mvm_ucode(mvm); if (ret && ret != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); if (!ret && iwl_mvm_is_lar_supported(mvm)) { @@ -701,9 +731,10 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) iwl_mvm_stop_device(mvm); mutex_unlock(&mvm->mutex); + wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); - if (ret < 0) + if (ret) IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); return ret; @@ -711,6 +742,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm) { + struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; int ret; iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); @@ -718,10 +750,17 @@ static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm) ret = iwl_mvm_mac_setup_register(mvm); if (ret) return ret; + mvm->hw_registered = true; iwl_mvm_dbgfs_register(mvm); + wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, + mvm->mei_rfkill_blocked, + RFKILL_HARD_BLOCK_NOT_OWNER); + + iwl_mvm_mei_set_sw_rfkill_state(mvm); + return 0; } @@ -902,6 +941,109 @@ static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = { .frob_mem = iwl_mvm_frob_mem, }; +static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info) +{ + struct iwl_mvm *mvm = priv; + struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info; + + /* + * This is protected by the guarantee that this function will not be + * called twice on two different threads + */ + prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true); + + curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL); + if (!curr_conn_info) + return; + + curr_conn_info->conn_info = *conn_info; + + rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info); + + if (prev_conn_info) + kfree_rcu(prev_conn_info, rcu_head); +} + +static void iwl_mvm_mei_rfkill(void *priv, bool blocked) +{ + struct iwl_mvm *mvm = priv; + + mvm->mei_rfkill_blocked = blocked; + if (!mvm->hw_registered) + return; + + wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, + mvm->mei_rfkill_blocked, + RFKILL_HARD_BLOCK_NOT_OWNER); +} + +static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden) +{ + struct iwl_mvm *mvm = priv; + + if (!mvm->hw_registered || !mvm->csme_vif) + return; + + iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden); +} + +static void iwl_mvm_sap_connected_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = + container_of(wk, struct iwl_mvm, sap_connected_wk); + int ret; + + ret = iwl_mvm_start_get_nvm(mvm); + if (ret) + goto out_free; + + ret = iwl_mvm_start_post_nvm(mvm); + if (ret) + goto out_free; + + return; + +out_free: + IWL_ERR(mvm, "Couldn't get started...\n"); + iwl_mei_start_unregister(); + iwl_mei_unregister_complete(); + iwl_fw_flush_dumps(&mvm->fwrt); + iwl_mvm_thermal_exit(mvm); + iwl_fw_runtime_free(&mvm->fwrt); + iwl_phy_db_free(mvm->phy_db); + kfree(mvm->scan_cmd); + iwl_trans_op_mode_leave(mvm->trans); + kfree(mvm->nvm_data); + kfree(mvm->mei_nvm_data); + + ieee80211_free_hw(mvm->hw); +} + +static void iwl_mvm_mei_sap_connected(void *priv) +{ + struct iwl_mvm *mvm = priv; + + if (!mvm->hw_registered) + schedule_work(&mvm->sap_connected_wk); +} + +static void iwl_mvm_mei_nic_stolen(void *priv) +{ + struct iwl_mvm *mvm = priv; + + rtnl_lock(); + cfg80211_shutdown_all_interfaces(mvm->hw->wiphy); + rtnl_unlock(); +} + +static const struct iwl_mei_ops mei_ops = { + .me_conn_status = iwl_mvm_me_conn_status, + .rfkill = iwl_mvm_mei_rfkill, + .roaming_forbidden = iwl_mvm_mei_roaming_forbidden, + .sap_connected = iwl_mvm_mei_sap_connected, + .nic_stolen = iwl_mvm_mei_nic_stolen, +}; + static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) @@ -913,9 +1055,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, static const u8 no_reclaim_cmds[] = { TX_CMD, }; - int err, scan_size; + int scan_size; u32 min_backoff; enum iwl_amsdu_size rb_size_default; + struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; /* * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station @@ -1015,6 +1158,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); + INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); @@ -1137,10 +1281,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, IWL_DEBUG_EEPROM(mvm->trans->dev, "working without external nvm file\n"); - err = iwl_trans_start_hw(mvm->trans); - if (err) - goto out_free; - scan_size = iwl_mvm_scan_size(mvm); mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); @@ -1165,8 +1305,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->debugfs_dir = dbgfs_dir; - if (iwl_mvm_start_get_nvm(mvm)) - goto out_thermal_exit; + mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops); + + /* + * Get NVM failed, but we are registered to MEI, we'll get + * the NVM later when it'll be possible to get it from CSME. + */ + if (iwl_mvm_start_get_nvm(mvm) && mvm->mei_registered) + return op_mode; if (iwl_mvm_start_post_nvm(mvm)) goto out_thermal_exit; @@ -1175,6 +1321,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, out_thermal_exit: iwl_mvm_thermal_exit(mvm); + if (mvm->mei_registered) { + iwl_mei_start_unregister(); + iwl_mei_unregister_complete(); + } out_free: iwl_fw_flush_dumps(&mvm->fwrt); iwl_fw_runtime_free(&mvm->fwrt); @@ -1201,6 +1351,7 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm) iwl_trans_stop_device(mvm->trans); iwl_free_fw_paging(&mvm->fwrt); iwl_fw_dump_conf_clear(&mvm->fwrt); + iwl_mvm_mei_device_down(mvm); } static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) @@ -1208,11 +1359,33 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); int i; + if (mvm->mei_registered) { + rtnl_lock(); + iwl_mei_set_netdev(NULL); + rtnl_unlock(); + iwl_mei_start_unregister(); + } + + /* + * After we unregister from mei, the worker can't be scheduled + * anymore. + */ + cancel_work_sync(&mvm->sap_connected_wk); + iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); - ieee80211_unregister_hw(mvm->hw); + /* + * If we couldn't get ownership on the device and we couldn't + * get the NVM from CSME, we haven't registered to mac80211. + * In that case, we didn't fail op_mode_start, because we are + * waiting for CSME to allow us to get the NVM to register to + * mac80211. If that didn't happen, we haven't registered to + * mac80211, hence the if below. + */ + if (mvm->hw_registered) + ieee80211_unregister_hw(mvm->hw); kfree(mvm->scan_cmd); kfree(mvm->mcast_filter_cmd); @@ -1227,6 +1400,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) mvm->phy_db = NULL; kfree(mvm->nvm_data); + kfree(mvm->mei_nvm_data); + kfree(rcu_access_pointer(mvm->csme_conn_info)); + kfree(mvm->temp_nvm_data); for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); @@ -1235,6 +1411,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_fw_runtime_free(&mvm->fwrt); mutex_destroy(&mvm->mutex); + if (mvm->mei_registered) + iwl_mei_unregister_complete(); + ieee80211_free_hw(mvm->hw); } @@ -1517,6 +1696,12 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) iwl_mvm_set_rfkill_state(mvm); } +struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm) +{ + return rcu_dereference_protected(mvm->csme_conn_info, + lockdep_is_held(&mvm->mutex)); +} + static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); @@ -1600,6 +1785,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) */ if (!mvm->fw_restart && fw_error) { iwl_fw_error_collect(&mvm->fwrt, false); + } else if (test_bit(IWL_MVM_STATUS_STARTING, + &mvm->status)) { + IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n"); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 32b4d1935788..e34b82b2a288 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -373,6 +373,7 @@ struct iwl_mvm_rxq_dup_data { * @tx_ant: the index of the antenna to use for data tx to this station. Only * used during connection establishment (e.g. for the 4 way handshake * exchange). + * @pairwise_cipher: used to feed iwlmei upon authorization * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -415,6 +416,7 @@ struct iwl_mvm_sta { u8 sleep_tx_count; u8 avg_energy; u8 tx_ant; + u32 pairwise_cipher; }; u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index bdd4ee432548..1883d98abb2f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1128,6 +1128,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); + if (ieee80211_is_data(fc)) + iwl_mvm_mei_tx_copy_to_csme(mvm, skb, + info->control.hw_key ? + info->control.hw_key->iv_len : 0); + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c new file mode 100644 index 000000000000..f702ad85e609 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ +#include "mvm.h" +#include <linux/nl80211-vnd-intel.h> + +static const struct nla_policy +iwl_mvm_vendor_attr_policy[NUM_IWL_MVM_VENDOR_ATTR] = { + [IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN] = { .type = NLA_U8 }, + [IWL_MVM_VENDOR_ATTR_AUTH_MODE] = { .type = NLA_U32 }, + [IWL_MVM_VENDOR_ATTR_CHANNEL_NUM] = { .type = NLA_U8 }, + [IWL_MVM_VENDOR_ATTR_SSID] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_SSID_LEN }, + [IWL_MVM_VENDOR_ATTR_BAND] = { .type = NLA_U8 }, + [IWL_MVM_VENDOR_ATTR_COLLOC_CHANNEL] = { .type = NLA_U8 }, + [IWL_MVM_VENDOR_ATTR_COLLOC_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN }, +}; + +static int iwl_mvm_vendor_get_csme_conn_info(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_csme_conn_info *csme_conn_info; + struct sk_buff *skb; + int err = 0; + + mutex_lock(&mvm->mutex); + csme_conn_info = iwl_mvm_get_csme_conn_info(mvm); + + if (!csme_conn_info) { + err = -EINVAL; + goto out_unlock; + } + + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 200); + if (!skb) { + err = -ENOMEM; + goto out_unlock; + } + + if (nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_AUTH_MODE, + csme_conn_info->conn_info.auth_mode) || + nla_put(skb, IWL_MVM_VENDOR_ATTR_SSID, + csme_conn_info->conn_info.ssid_len, + csme_conn_info->conn_info.ssid) || + nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_STA_CIPHER, + csme_conn_info->conn_info.pairwise_cipher) || + nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_CHANNEL_NUM, + csme_conn_info->conn_info.channel) || + nla_put(skb, IWL_MVM_VENDOR_ATTR_ADDR, ETH_ALEN, + csme_conn_info->conn_info.bssid)) { + kfree_skb(skb); + err = -ENOBUFS; + } + +out_unlock: + mutex_unlock(&mvm->mutex); + if (err) + return err; + + return cfg80211_vendor_cmd_reply(skb); +} + +static int iwl_mvm_vendor_host_get_ownership(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + iwl_mvm_mei_get_ownership(mvm); + mutex_unlock(&mvm->mutex); + + return 0; +} + +static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = { + { + .info = { + .vendor_id = INTEL_OUI, + .subcmd = IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO, + }, + .doit = iwl_mvm_vendor_get_csme_conn_info, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV, + .policy = iwl_mvm_vendor_attr_policy, + .maxattr = MAX_IWL_MVM_VENDOR_ATTR, + }, + { + .info = { + .vendor_id = INTEL_OUI, + .subcmd = IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP, + }, + .doit = iwl_mvm_vendor_host_get_ownership, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV, + .policy = iwl_mvm_vendor_attr_policy, + .maxattr = MAX_IWL_MVM_VENDOR_ATTR, + }, +}; + +enum iwl_mvm_vendor_events_idx { + /* 0x0 - 0x3 are deprecated */ + IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4, + NUM_IWL_MVM_VENDOR_EVENT_IDX +}; + +static const struct nl80211_vendor_cmd_info +iwl_mvm_vendor_events[NUM_IWL_MVM_VENDOR_EVENT_IDX] = { + [IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN] = { + .vendor_id = INTEL_OUI, + .subcmd = IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT, + }, +}; + +void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) +{ + mvm->hw->wiphy->vendor_commands = iwl_mvm_vendor_commands; + mvm->hw->wiphy->n_vendor_commands = ARRAY_SIZE(iwl_mvm_vendor_commands); + mvm->hw->wiphy->vendor_events = iwl_mvm_vendor_events; + mvm->hw->wiphy->n_vendor_events = ARRAY_SIZE(iwl_mvm_vendor_events); +} + +void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool forbidden) +{ + struct sk_buff *msg = + cfg80211_vendor_event_alloc(mvm->hw->wiphy, + ieee80211_vif_to_wdev(vif), + 200, IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN, + GFP_ATOMIC); + if (!msg) + return; + + if (WARN_ON(!vif)) + return; + + if (nla_put(msg, IWL_MVM_VENDOR_ATTR_VIF_ADDR, + ETH_ALEN, vif->addr) || + nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN, forbidden)) + goto nla_put_failure; + + cfg80211_vendor_event(msg, GFP_ATOMIC); + return; + + nla_put_failure: + kfree_skb(msg); +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index c574f041f096..c2d1e899477c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1339,9 +1339,13 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores) { + int num_devices = ARRAY_SIZE(iwl_dev_info_table); int i; - for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) { + if (!num_devices) + return NULL; + + for (i = num_devices - 1; i >= 0; i--) { const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; if (dev_info->device != (u16)IWL_CFG_ANY && @@ -1422,15 +1426,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * first trying to load the firmware etc. and potentially only * detecting any problems when the first interface is brought up. */ - ret = iwl_finish_nic_init(iwl_trans); - if (ret) - goto out_free_trans; - if (iwl_trans_grab_nic_access(iwl_trans)) { - /* all good */ - iwl_trans_release_nic_access(iwl_trans); - } else { - ret = -EIO; - goto out_free_trans; + ret = iwl_pcie_prepare_card_hw(iwl_trans); + if (!ret) { + ret = iwl_finish_nic_init(iwl_trans); + if (ret) + goto out_free_trans; + if (iwl_trans_grab_nic_access(iwl_trans)) { + /* all good */ + iwl_trans_release_nic_access(iwl_trans); + } else { + ret = -EIO; + goto out_free_trans; + } } iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); @@ -1442,8 +1449,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ if (iwl_trans->trans_cfg->rf_id && iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && - !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) + !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) { + ret = -EINVAL; goto out_free_trans; + } dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, CSR_HW_REV_TYPE(iwl_trans->hw_rev), @@ -1563,6 +1572,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_trans; pci_set_drvdata(pdev, iwl_trans); + + /* try to get ownership so that we'll know if we don't own it */ + iwl_pcie_prepare_card_hw(iwl_trans); + iwl_trans->drv = iwl_drv_start(iwl_trans); if (IS_ERR(iwl_trans->drv)) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 1efb53f78a62..451b28de1392 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -24,6 +24,7 @@ #include "fw/error-dump.h" #include "fw/dbg.h" #include "fw/api/tx.h" +#include "mei/iwl-mei.h" #include "internal.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" @@ -594,8 +595,10 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) ret = iwl_pcie_set_hw_ready(trans); /* If the card is ready, exit 0 */ - if (ret >= 0) + if (ret >= 0) { + trans->csme_own = false; return 0; + } iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); @@ -608,8 +611,22 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) do { ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) + if (ret >= 0) { + trans->csme_own = false; return 0; + } + + if (iwl_mei_is_connected()) { + IWL_DEBUG_INFO(trans, + "Couldn't prepare the card but SAP is connected\n"); + trans->csme_own = true; + if (trans->trans_cfg->device_family != + IWL_DEVICE_FAMILY_9000) + IWL_ERR(trans, + "SAP not supported for this NIC family\n"); + + return -EBUSY; + } usleep_range(200, 1000); t += 200; diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index e459e7192ae9..b74f4cb5d6d3 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -1815,8 +1815,9 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev) memset(&txdesc, 0, sizeof(txdesc)); /* skb->data starts with txdesc->frame_control */ - hdr_len = 24; - skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len); + hdr_len = sizeof(txdesc.header); + BUILD_BUG_ON(hdr_len != 24); + skb_copy_from_linear_data(skb, &txdesc.header, hdr_len); if (ieee80211_is_data(txdesc.frame_control) && ieee80211_has_a4(txdesc.frame_control) && skb->len >= 30) { diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h index dd2603d9b5d3..c25cd21d18bd 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h +++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h @@ -115,12 +115,14 @@ struct hfa384x_tx_frame { __le16 tx_control; /* HFA384X_TX_CTRL_ flags */ /* 802.11 */ - __le16 frame_control; /* parts not used */ - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; /* filled by firmware */ - u8 addr3[ETH_ALEN]; - __le16 seq_ctrl; /* filled by firmware */ + struct_group(header, + __le16 frame_control; /* parts not used */ + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; /* filled by firmware */ + u8 addr3[ETH_ALEN]; + __le16 seq_ctrl; /* filled by firmware */ + ); u8 addr4[ETH_ALEN]; __le16 data_len; diff --git a/drivers/net/wireless/marvell/libertas/host.h b/drivers/net/wireless/marvell/libertas/host.h index dfa22468b14a..af96bdba3b2b 100644 --- a/drivers/net/wireless/marvell/libertas/host.h +++ b/drivers/net/wireless/marvell/libertas/host.h @@ -308,10 +308,12 @@ struct txpd { __le32 tx_packet_location; /* Tx packet length */ __le16 tx_packet_length; - /* First 2 byte of destination MAC address */ - u8 tx_dest_addr_high[2]; - /* Last 4 byte of destination MAC address */ - u8 tx_dest_addr_low[4]; + struct_group(tx_dest_addr, + /* First 2 byte of destination MAC address */ + u8 tx_dest_addr_high[2]; + /* Last 4 byte of destination MAC address */ + u8 tx_dest_addr_low[4]; + ); /* Pkt Priority */ u8 priority; /* Pkt Trasnit Power control */ diff --git a/drivers/net/wireless/marvell/libertas/tx.c b/drivers/net/wireless/marvell/libertas/tx.c index aeb481740df6..27304a98787d 100644 --- a/drivers/net/wireless/marvell/libertas/tx.c +++ b/drivers/net/wireless/marvell/libertas/tx.c @@ -113,6 +113,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) p802x_hdr = skb->data; pkt_len = skb->len; + BUILD_BUG_ON(sizeof(txpd->tx_dest_addr) != ETH_ALEN); if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data; @@ -124,10 +125,10 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) pkt_len -= sizeof(*rtap_hdr); /* copy destination address from 802.11 header */ - memcpy(txpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN); + memcpy(&txpd->tx_dest_addr, p802x_hdr + 4, ETH_ALEN); } else { /* copy destination address from 802.3 header */ - memcpy(txpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN); + memcpy(&txpd->tx_dest_addr, p802x_hdr, ETH_ALEN); } txpd->tx_packet_length = cpu_to_le16(pkt_len); diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h index 5d726545d987..b2af2ddb6bc4 100644 --- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h +++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h @@ -268,10 +268,12 @@ struct txpd { __le32 tx_packet_location; /* Tx packet length */ __le16 tx_packet_length; - /* First 2 byte of destination MAC address */ - u8 tx_dest_addr_high[2]; - /* Last 4 byte of destination MAC address */ - u8 tx_dest_addr_low[4]; + struct_group(tx_dest_addr, + /* First 2 byte of destination MAC address */ + u8 tx_dest_addr_high[2]; + /* Last 4 byte of destination MAC address */ + u8 tx_dest_addr_low[4]; + ); /* Pkt Priority */ u8 priority; /* Pkt Trasnit Power control */ diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c index 71492211904b..02a1e1f547d8 100644 --- a/drivers/net/wireless/marvell/libertas_tf/main.c +++ b/drivers/net/wireless/marvell/libertas_tf/main.c @@ -232,7 +232,8 @@ static void lbtf_tx_work(struct work_struct *work) ieee80211_get_tx_rate(priv->hw, info)->hw_value); /* copy destination address from 802.11 header */ - memcpy(txpd->tx_dest_addr_high, skb->data + sizeof(struct txpd) + 4, + BUILD_BUG_ON(sizeof(txpd->tx_dest_addr) != ETH_ALEN); + memcpy(&txpd->tx_dest_addr, skb->data + sizeof(struct txpd) + 4, ETH_ALEN); txpd->tx_packet_length = cpu_to_le16(len); txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 2ff23ab259ab..63c25c69ed2b 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -2071,9 +2071,11 @@ struct mwifiex_ie_types_robust_coex { __le32 mode; } __packed; +#define MWIFIEX_VERSION_STR_LENGTH 128 + struct host_cmd_ds_version_ext { u8 version_str_sel; - char version_str[128]; + char version_str[MWIFIEX_VERSION_STR_LENGTH]; } __packed; struct host_cmd_ds_mgmt_frame_reg { diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 19b996c6a260..ace7371c4773 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -226,6 +226,23 @@ exit_rx_proc: return 0; } +static void maybe_quirk_fw_disable_ds(struct mwifiex_adapter *adapter) +{ + struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); + struct mwifiex_ver_ext ver_ext; + + if (test_and_set_bit(MWIFIEX_IS_REQUESTING_FW_VEREXT, &adapter->work_flags)) + return; + + memset(&ver_ext, 0, sizeof(ver_ext)); + ver_ext.version_str_sel = 1; + if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT, + HostCmd_ACT_GEN_GET, 0, &ver_ext, false)) { + mwifiex_dbg(priv->adapter, MSG, + "Checking hardware revision failed.\n"); + } +} + /* * The main process. * @@ -356,6 +373,7 @@ process_start: if (adapter->hw_status == MWIFIEX_HW_STATUS_INIT_DONE) { adapter->hw_status = MWIFIEX_HW_STATUS_READY; mwifiex_init_fw_complete(adapter); + maybe_quirk_fw_disable_ds(adapter); } } diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 90012cbcfd15..332dd1c8db35 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -524,6 +524,7 @@ enum mwifiex_adapter_work_flags { MWIFIEX_IS_SUSPENDED, MWIFIEX_IS_HS_CONFIGURED, MWIFIEX_IS_HS_ENABLING, + MWIFIEX_IS_REQUESTING_FW_VEREXT, }; struct mwifiex_band_config { @@ -646,7 +647,7 @@ struct mwifiex_private { struct wireless_dev wdev; struct mwifiex_chan_freq_power cfp; u32 versionstrsel; - char version_str[128]; + char version_str[MWIFIEX_VERSION_STR_LENGTH]; #ifdef CONFIG_DEBUG_FS struct dentry *dfs_dev_dir; #endif @@ -1055,6 +1056,8 @@ struct mwifiex_adapter { void *devdump_data; int devdump_len; struct timer_list devdump_timer; + + bool ignore_btcoex_events; }; void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index c3f5583ea70d..d5fb29400bad 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -3152,6 +3152,9 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter) if (ret) goto err_alloc_buffers; + if (pdev->device == PCIE_DEVICE_ID_MARVELL_88W8897) + adapter->ignore_btcoex_events = true; + return 0; err_alloc_buffers: diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 6b5d35d9e69f..1a4ae8a42a31 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -708,11 +708,35 @@ static int mwifiex_ret_ver_ext(struct mwifiex_private *priv, { struct host_cmd_ds_version_ext *ver_ext = &resp->params.verext; + if (test_and_clear_bit(MWIFIEX_IS_REQUESTING_FW_VEREXT, &priv->adapter->work_flags)) { + if (strncmp(ver_ext->version_str, "ChipRev:20, BB:9b(10.00), RF:40(21)", + MWIFIEX_VERSION_STR_LENGTH) == 0) { + struct mwifiex_ds_auto_ds auto_ds = { + .auto_ds = DEEP_SLEEP_OFF, + }; + + mwifiex_dbg(priv->adapter, MSG, + "Bad HW revision detected, disabling deep sleep\n"); + + if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH, + DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, false)) { + mwifiex_dbg(priv->adapter, MSG, + "Disabling deep sleep failed.\n"); + } + } + + return 0; + } + if (version_ext) { version_ext->version_str_sel = ver_ext->version_str_sel; memcpy(version_ext->version_str, ver_ext->version_str, - sizeof(char) * 128); - memcpy(priv->version_str, ver_ext->version_str, 128); + MWIFIEX_VERSION_STR_LENGTH); + memcpy(priv->version_str, ver_ext->version_str, + MWIFIEX_VERSION_STR_LENGTH); + + /* Ensure the version string from the firmware is 0-terminated */ + priv->version_str[MWIFIEX_VERSION_STR_LENGTH - 1] = '\0'; } return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 68c63268e2e6..80e5d44bad9d 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -1058,6 +1058,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_BT_COEX_WLAN_PARA_CHANGE: dev_dbg(adapter->dev, "EVENT: BT coex wlan param update\n"); + if (adapter->ignore_btcoex_events) + break; + mwifiex_bt_coex_wlan_param_update_event(priv, adapter->event_skb); break; diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 9736aa0ab7fd..8f01fcbe9396 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -130,7 +130,8 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, default: mwifiex_dbg(adapter, ERROR, "unknown recv_type %#x\n", recv_type); - return -1; + ret = -1; + goto exit_restore_skb; } break; case MWIFIEX_USB_EP_DATA: diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index 529e325498cd..864a2ba9efee 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -4225,9 +4225,11 @@ struct mwl8k_cmd_set_key { __le32 key_info; __le32 key_id; __le16 key_len; - __u8 key_material[MAX_ENCR_KEY_LENGTH]; - __u8 tkip_tx_mic_key[MIC_KEY_LENGTH]; - __u8 tkip_rx_mic_key[MIC_KEY_LENGTH]; + struct { + __u8 key_material[MAX_ENCR_KEY_LENGTH]; + __u8 tkip_tx_mic_key[MIC_KEY_LENGTH]; + __u8 tkip_rx_mic_key[MIC_KEY_LENGTH]; + } tkip; __le16 tkip_rsc_low; __le32 tkip_rsc_high; __le16 tkip_tsc_low; @@ -4375,7 +4377,7 @@ static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw, goto done; } - memcpy(cmd->key_material, key->key, keymlen); + memcpy(&cmd->tkip, key->key, keymlen); cmd->action = cpu_to_le32(action); rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index b8bcf22a07fd..027d9ea17d04 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -74,13 +74,12 @@ EXPORT_SYMBOL_GPL(mt76_queues_read); static int mt76_rx_queues_read(struct seq_file *s, void *data) { struct mt76_dev *dev = dev_get_drvdata(s->private); - int i, queued; + int i; seq_puts(s, " queue | hw-queued | head | tail |\n"); mt76_for_each_q_rx(dev, i) { struct mt76_queue *q = &dev->q_rx[i]; - queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; seq_printf(s, " %9d | %9d | %9d | %9d |\n", i, q->queued, q->head, q->tail); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c index 5ee52cd70a4b..d1806f198aed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c @@ -143,8 +143,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (!wcid) wcid = &dev->mt76.global_wcid; - pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); - if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) { struct mt7615_phy *phy = &dev->phy; @@ -164,6 +162,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (id < 0) return id; + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta, pid, key, false); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c index bd2939ebcbf4..5a6d7829c6e0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -43,19 +43,11 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map); static void mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid, enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int pid, struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_key_conf *key = info->control.hw_key; - __le32 *txwi; - int pid; - - if (!wcid) - wcid = &dev->mt76.global_wcid; - - pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + __le32 *txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); - txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); memset(txwi, 0, MT_USB_TXD_SIZE); mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false); skb_push(skb, MT_USB_TXD_SIZE); @@ -194,10 +186,14 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); struct sk_buff *skb = tx_info->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *key = info->control.hw_key; struct mt7615_sta *msta; - int pad; + int pad, err, pktid; msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL; + if (!wcid) + wcid = &dev->mt76.global_wcid; + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta && !msta->rate_probe) { /* request to configure sampling rate */ @@ -207,7 +203,8 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, spin_unlock_bh(&dev->mt76.lock); } - mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb); + pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb); if (mt76_is_usb(mdev)) { u32 len = skb->len; @@ -217,7 +214,12 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, pad = round_up(skb->len, 4) - skb->len; } - return mt76_skb_adjust_pad(skb, pad); + err = mt76_skb_adjust_pad(skb, pad); + if (err) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pktid); + + return err; } EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index efd70ddc2fd1..2c6c03809b20 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -72,6 +72,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU; enum mt76_qsel qsel; u32 flags; + int err; mt76_insert_hdr_pad(tx_info->skb); @@ -106,7 +107,12 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, ewma_pktlen_add(&msta->pktlen, tx_info->skb->len); } - return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags); + err = mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags); + if (err && wcid) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pid); + + return err; } EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 5fcf35f2d9fb..809dc18e5083 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -1151,8 +1151,14 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } } - pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); + t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); + t->skb = tx_info->skb; + + id = mt76_token_consume(mdev, &t); + if (id < 0) + return id; + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key, false); @@ -1178,13 +1184,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, txp->bss_idx = mvif->idx; } - t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); - t->skb = tx_info->skb; - - id = mt76_token_consume(mdev, &t); - if (id < 0) - return id; - txp->token = cpu_to_le16(id); if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) txp->rept_wds_wcid = cpu_to_le16(wcid->idx); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 899957b9d0f1..852d5d97c70b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -176,7 +176,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta) if (ht_cap->ht_supported) mode |= PHY_MODE_GN; - if (he_cap->has_he) + if (he_cap && he_cap->has_he) mode |= PHY_MODE_AX_24G; } else if (band == NL80211_BAND_5GHZ) { mode |= PHY_MODE_A; @@ -187,7 +187,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta) if (vht_cap->vht_supported) mode |= PHY_MODE_AC; - if (he_cap->has_he) + if (he_cap && he_cap->has_he) mode |= PHY_MODE_AX_5G; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c index 137f86a6dbf8..bdec508b6b9f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c @@ -142,15 +142,11 @@ out: static void mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid, enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int pid, struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_key_conf *key = info->control.hw_key; - __le32 *txwi; - int pid; + __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE); - pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); - txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE); memset(txwi, 0, MT_SDIO_TXD_SIZE); mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false); skb_push(skb, MT_SDIO_TXD_SIZE); @@ -163,8 +159,9 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, { struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; struct sk_buff *skb = tx_info->skb; - int pad; + int err, pad, pktid; if (unlikely(tx_info->skb->len <= ETH_HLEN)) return -EINVAL; @@ -181,12 +178,18 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } } - mt7921s_write_txwi(dev, wcid, qid, sta, skb); + pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb); mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA); pad = round_up(skb->len, 4) - skb->len; - return mt76_skb_adjust_pad(skb, pad); + err = mt76_skb_adjust_pad(skb, pad); + if (err) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pktid); + + return err; } void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 11719ef034d8..6b8c9dc80542 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -173,7 +173,7 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, if (!(cb->flags & MT_TX_CB_DMA_DONE)) continue; - if (!time_is_after_jiffies(cb->jiffies + + if (time_is_after_jiffies(cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT)) continue; } diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index ea81ef120fd1..82566544419a 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -626,7 +626,6 @@ void chip_wakeup(struct wilc *wilc) u32 clk_status_val = 0, trials = 0; u32 wakeup_reg, wakeup_bit; u32 clk_status_reg, clk_status_bit; - u32 to_host_from_fw_reg, to_host_from_fw_bit; u32 from_host_to_fw_reg, from_host_to_fw_bit; const struct wilc_hif_func *hif_func = wilc->hif_func; @@ -637,8 +636,6 @@ void chip_wakeup(struct wilc *wilc) clk_status_bit = WILC_SDIO_CLK_STATUS_BIT; from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG; from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT; - to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG; - to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT; } else { wakeup_reg = WILC_SPI_WAKEUP_REG; wakeup_bit = WILC_SPI_WAKEUP_BIT; @@ -646,8 +643,6 @@ void chip_wakeup(struct wilc *wilc) clk_status_bit = WILC_SPI_CLK_STATUS_BIT; from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG; from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT; - to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG; - to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT; } /* indicate host wakeup */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index e4473a551241..74c3d8cb3100 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -25,6 +25,9 @@ static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status) if (status == -ENODEV || status == -ENOENT) return true; + if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) + return false; + if (status == -EPROTO || status == -ETIMEDOUT) rt2x00dev->num_proto_errs++; else diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index 9b83c710c9b8..51fe51bb0504 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -899,7 +899,7 @@ static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl) u8 place = chnl; if (chnl > 14) { - for (place = 14; place < sizeof(channel5g); place++) { + for (place = 14; place < ARRAY_SIZE(channel5g); place++) { if (channel5g[place] == chnl) { place++; break; @@ -1366,7 +1366,7 @@ u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl) u8 place; if (chnl > 14) { - for (place = 14; place < sizeof(channel_all); place++) { + for (place = 14; place < ARRAY_SIZE(channel_all); place++) { if (channel_all[place] == chnl) return place - 13; } @@ -2428,7 +2428,7 @@ static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel) int i; - for (i = 0; i < sizeof(channel5g); i++) + for (i = 0; i < ARRAY_SIZE(channel5g); i++) if (channel == channel5g[i]) return true; return false; @@ -2692,9 +2692,8 @@ void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw) u8 i; rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "settings regs %d default regs %d\n", - (int)(sizeof(rtlphy->iqk_matrix) / - sizeof(struct iqk_matrix_regs)), + "settings regs %zu default regs %d\n", + ARRAY_SIZE(rtlphy->iqk_matrix), IQK_MATRIX_REG_NUM); /* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */ for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) { @@ -2861,16 +2860,14 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw) case BAND_ON_5G: /* Get first channel error when change between * 5G and 2.4G band. */ - if (channel <= 14) + if (WARN_ONCE(channel <= 14, "rtl8192de: 5G but channel<=14\n")) return 0; - WARN_ONCE((channel <= 14), "rtl8192de: 5G but channel<=14\n"); break; case BAND_ON_2_4G: /* Get first channel error when change between * 5G and 2.4G band. */ - if (channel > 14) + if (WARN_ONCE(channel > 14, "rtl8192de: 2G but channel>14\n")) return 0; - WARN_ONCE((channel > 14), "rtl8192de: 2G but channel>14\n"); break; default: WARN_ONCE(true, "rtl8192de: Invalid WirelessMode(%#x)!!\n", diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index aa07856411b1..31f9e9e5c680 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -108,7 +108,6 @@ #define CHANNEL_GROUP_IDX_5GM 6 #define CHANNEL_GROUP_IDX_5GH 9 #define CHANNEL_GROUP_MAX_5G 9 -#define CHANNEL_MAX_NUMBER_2G 14 #define AVG_THERMAL_NUM 8 #define AVG_THERMAL_NUM_88E 4 #define AVG_THERMAL_NUM_8723BE 4 diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index 682b23502e6e..37cf1439a8a0 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -904,6 +904,39 @@ static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v) return 0; } +static ssize_t rtw_debugfs_set_force_lowest_basic_rate(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + bool input; + int err; + + err = kstrtobool_from_user(buffer, count, &input); + if (err) + return err; + + if (input) + set_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); + else + clear_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); + + return count; +} + +static int rtw_debugfs_get_force_lowest_basic_rate(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + + seq_printf(m, "force lowest basic rate: %d\n", + test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags)); + + return 0; +} + static ssize_t rtw_debugfs_set_dm_cap(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) @@ -1094,6 +1127,11 @@ static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = { .cb_read = rtw_debugfs_get_fw_crash, }; +static struct rtw_debugfs_priv rtw_debug_priv_force_lowest_basic_rate = { + .cb_write = rtw_debugfs_set_force_lowest_basic_rate, + .cb_read = rtw_debugfs_get_force_lowest_basic_rate, +}; + static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = { .cb_write = rtw_debugfs_set_dm_cap, .cb_read = rtw_debugfs_get_dm_cap, @@ -1174,6 +1212,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev) rtw_debugfs_add_r(tx_pwr_tbl); rtw_debugfs_add_rw(edcca_enable); rtw_debugfs_add_rw(fw_crash); + rtw_debugfs_add_rw(force_lowest_basic_rate); rtw_debugfs_add_rw(dm_cap); } diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index bbdd535b64e7..c37f1d508882 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -364,6 +364,7 @@ enum rtw_flags { RTW_FLAG_WOWLAN, RTW_FLAG_RESTARTING, RTW_FLAG_RESTART_TRIGGERING, + RTW_FLAG_FORCE_LOWEST_RATE, NUM_OF_RTW_FLAGS, }; diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index a7a6ebfaa203..3b367c9085eb 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -1738,6 +1738,15 @@ static const struct dmi_system_id rtw88_pci_quirks[] = { }, .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM), }, + { + .callback = disable_pci_caps, + .ident = "HP HP 250 G7 Notebook PC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"), + }, + .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM), + }, {} }; diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c index 3a101aa139ed..f9332fa463ca 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.c +++ b/drivers/net/wireless/realtek/rtw88/tx.c @@ -233,17 +233,34 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src) spin_unlock_irqrestore(&tx_report->q_lock, flags); } +static u8 rtw_get_mgmt_rate(struct rtw_dev *rtwdev, struct sk_buff *skb, + u8 lowest_rate, bool ignore_rate) +{ + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = tx_info->control.vif; + bool force_lowest = test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); + + if (!vif || !vif->bss_conf.basic_rates || ignore_rate || force_lowest) + return lowest_rate; + + return __ffs(vif->bss_conf.basic_rates) + lowest_rate; +} + static void rtw_tx_pkt_info_update_rate(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, - struct sk_buff *skb) + struct sk_buff *skb, + bool ignore_rate) { if (rtwdev->hal.current_band_type == RTW_BAND_2G) { pkt_info->rate_id = RTW_RATEID_B_20M; - pkt_info->rate = DESC_RATE1M; + pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE1M, + ignore_rate); } else { pkt_info->rate_id = RTW_RATEID_G; - pkt_info->rate = DESC_RATE6M; + pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE6M, + ignore_rate); } + pkt_info->use_rate = true; pkt_info->dis_rate_fallback = true; } @@ -280,7 +297,7 @@ static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, struct sk_buff *skb) { - rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb); + rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, false); pkt_info->dis_qselseq = true; pkt_info->en_hwseq = true; pkt_info->hw_ssn_sel = 0; @@ -404,7 +421,7 @@ void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, if (type != RSVD_BEACON && type != RSVD_DUMMY) pkt_info->qsel = TX_DESC_QSEL_MGMT; - rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb); + rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, true); bmc = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index c2885e4dd882..3729abda04f9 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -411,12 +411,13 @@ enum rtw89_regulation_type { RTW89_NA = 4, RTW89_IC = 5, RTW89_KCC = 6, - RTW89_NCC = 7, - RTW89_CHILE = 8, - RTW89_ACMA = 9, - RTW89_MEXICO = 10, + RTW89_ACMA = 7, + RTW89_NCC = 8, + RTW89_MEXICO = 9, + RTW89_CHILE = 10, RTW89_UKRAINE = 11, RTW89_CN = 12, + RTW89_QATAR = 13, RTW89_REGD_NUM, }; diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index 29eb188c888c..1e85808aaf4b 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -723,6 +723,7 @@ rtw89_debug_priv_mac_mem_dump_select(struct file *filp, } static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = { + [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR, [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR, [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR, [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR, @@ -735,6 +736,10 @@ static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = { [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR, [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR, [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR, + [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR, + [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR, + [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR, + [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR, }; static void rtw89_debug_dump_mac_mem(struct seq_file *m, @@ -814,7 +819,7 @@ rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp, return -EINVAL; } - enable = set == 0 ? false : true; + enable = set != 0; switch (sel) { case 0: debugfs_priv->dbgpkg_en.ss_dbg = enable; diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 212aaf577d3c..65ef3dc9d061 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -91,7 +91,6 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, info->section_num = GET_FW_HDR_SEC_NUM(fw); info->hdr_len = RTW89_FW_HDR_SIZE + info->section_num * RTW89_FW_SECTION_HDR_SIZE; - SET_FW_HDR_PART_SIZE(fw, FWDL_SECTION_PER_PKT_LEN); bin = fw + info->hdr_len; @@ -275,6 +274,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l } skb_put_data(skb, fw, len); + SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, H2C_CL_MAC_FWDL, H2C_FUNC_MAC_FWHDR_DL, len); diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 7ee0d9323310..36e8d0da6c1e 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -282,8 +282,10 @@ struct rtw89_h2creg_sch_tx_en { le32_get_bits(*((__le32 *)(fwhdr) + 6), GENMASK(15, 8)) #define GET_FW_HDR_CMD_VERSERION(fwhdr) \ le32_get_bits(*((__le32 *)(fwhdr) + 7), GENMASK(31, 24)) -#define SET_FW_HDR_PART_SIZE(fwhdr, val) \ - le32p_replace_bits((__le32 *)(fwhdr) + 7, val, GENMASK(15, 0)) +static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val) +{ + le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0)); +} #define SET_CTRL_INFO_MACID(table, val) \ le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0)) diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index afcd07ab1de7..f8389e849c67 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1093,7 +1093,6 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) static int dmac_func_en(struct rtw89_dev *rtwdev) { u32 val32; - u32 ret = 0; val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN | @@ -1107,7 +1106,7 @@ static int dmac_func_en(struct rtw89_dev *rtwdev) B_AX_WD_RLS_CLK_EN); rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32); - return ret; + return 0; } static int chip_func_en(struct rtw89_dev *rtwdev) @@ -3695,7 +3694,7 @@ void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev) { struct rtw89_traffic_stats *stats = &rtwdev->stats; struct rtw89_vif *rtwvif; - bool en = stats->tx_tfc_lv > stats->rx_tfc_lv ? false : true; + bool en = stats->tx_tfc_lv <= stats->rx_tfc_lv; bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); if (en == old) diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index 6f3db8a2a9c2..94cd29bd83d7 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -227,6 +227,7 @@ enum rtw89_mac_dbg_port_sel { /* SRAM mem dump */ #define R_AX_INDIR_ACCESS_ENTRY 0x40000 +#define AXIDMA_BASE_ADDR 0x18006000 #define STA_SCHED_BASE_ADDR 0x18808000 #define RXPLD_FLTR_CAM_BASE_ADDR 0x18813000 #define SECURITY_CAM_BASE_ADDR 0x18814000 @@ -240,10 +241,15 @@ enum rtw89_mac_dbg_port_sel { #define DMAC_TBL_BASE_ADDR 0x18800000 #define SHCUT_MACHDR_BASE_ADDR 0x18800800 #define BCN_IE_CAM1_BASE_ADDR 0x188A0000 +#define TXD_FIFO_0_BASE_ADDR 0x18856200 +#define TXD_FIFO_1_BASE_ADDR 0x188A1080 +#define TXDATA_FIFO_0_BASE_ADDR 0x18856000 +#define TXDATA_FIFO_1_BASE_ADDR 0x188A1000 #define CCTL_INFO_SIZE 32 enum rtw89_mac_mem_sel { + RTW89_MAC_MEM_AXIDMA, RTW89_MAC_MEM_SHARED_BUF, RTW89_MAC_MEM_DMAC_TBL, RTW89_MAC_MEM_SHCUT_MACHDR, @@ -256,6 +262,10 @@ enum rtw89_mac_mem_sel { RTW89_MAC_MEM_BA_CAM, RTW89_MAC_MEM_BCN_IE_CAM0, RTW89_MAC_MEM_BCN_IE_CAM1, + RTW89_MAC_MEM_TXD_FIFO_0, + RTW89_MAC_MEM_TXD_FIFO_1, + RTW89_MAC_MEM_TXDATA_FIFO_0, + RTW89_MAC_MEM_TXDATA_FIFO_1, /* keep last */ RTW89_MAC_MEM_LAST, diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index ab134856baac..312d9a07599d 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -654,6 +654,12 @@ rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev, u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE; u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE; + if (page >= RTW89_H2C_RF_PAGE_NUM) { + rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d", + rf_path, info->curr_idx); + return; + } + info->rtw89_phy_config_rf_h2c[page][idx] = cpu_to_le32((reg->addr << 20) | reg->data); info->curr_idx++; @@ -662,30 +668,29 @@ rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev, static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev, struct rtw89_fw_h2c_rf_reg_info *info) { - u16 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE; - u16 len = (info->curr_idx % RTW89_H2C_RF_PAGE_SIZE) * 4; + u16 remain = info->curr_idx; + u16 len = 0; u8 i; int ret = 0; - if (page > RTW89_H2C_RF_PAGE_NUM) { + if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) { rtw89_warn(rtwdev, - "rf reg h2c total page num %d larger than %d (RTW89_H2C_RF_PAGE_NUM)\n", - page, RTW89_H2C_RF_PAGE_NUM); - return -EINVAL; + "rf reg h2c total len %d larger than %d\n", + remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE); + ret = -EINVAL; + goto out; } - for (i = 0; i < page; i++) { - ret = rtw89_fw_h2c_rf_reg(rtwdev, info, - RTW89_H2C_RF_PAGE_SIZE * 4, i); + for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) { + len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain; + ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i); if (ret) - return ret; + goto out; } - ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len, i); - if (ret) - return ret; +out: info->curr_idx = 0; - return 0; + return ret; } static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev, @@ -1099,9 +1104,15 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, switch (band) { case RTW89_BAND_2G: lmt = (*chip->txpwr_lmt_2g)[bw][ntx][rs][bf][regd][ch_idx]; + if (!lmt) + lmt = (*chip->txpwr_lmt_2g)[bw][ntx][rs][bf] + [RTW89_WW][ch_idx]; break; case RTW89_BAND_5G: lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf][regd][ch_idx]; + if (!lmt) + lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf] + [RTW89_WW][ch_idx]; break; default: rtw89_warn(rtwdev, "unknown band type: %d\n", band); @@ -1224,9 +1235,15 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, switch (band) { case RTW89_BAND_2G: lmt_ru = (*chip->txpwr_lmt_ru_2g)[ru][ntx][regd][ch_idx]; + if (!lmt_ru) + lmt_ru = (*chip->txpwr_lmt_ru_2g)[ru][ntx] + [RTW89_WW][ch_idx]; break; case RTW89_BAND_5G: lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx][regd][ch_idx]; + if (!lmt_ru) + lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx] + [RTW89_WW][ch_idx]; break; default: rtw89_warn(rtwdev, "unknown band type: %d\n", band); @@ -1767,7 +1784,7 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev) } rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo); cfo->cfo_avg_pre = new_cfo; - x_cap_update = cfo->crystal_cap == pre_x_cap ? false : true; + x_cap_update = cfo->crystal_cap != pre_x_cap; rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update); rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n", cfo->def_x_cap, pre_x_cap, cfo->crystal_cap, diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index f00b94ecfff4..4c37e590e43c 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -15,243 +15,244 @@ static const struct rtw89_regulatory rtw89_ww_regd = COUNTRY_REGD("00", RTW89_WW, RTW89_WW); static const struct rtw89_regulatory rtw89_regd_map[] = { - COUNTRY_REGD("AR", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BO", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO), + COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("CL", RTW89_WW, RTW89_CHILE), + COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE), COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SV", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("HN", RTW89_WW, RTW89_FCC), - COUNTRY_REGD("MX", RTW89_FCC, RTW89_MEXICO), + COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO), COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("UY", RTW89_WW, RTW89_FCC), - COUNTRY_REGD("VE", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CY", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("DK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("EE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("FI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("FR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("DE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("HU", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IS", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LV", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LU", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MC", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NL", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("PL", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("PT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ES", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GB", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AL", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("HR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("EG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IQ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IL", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("JO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LB", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LS", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("OM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("QA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("RO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("RU", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("RS", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ME", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ZA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("UA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("YE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ZW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BD", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("HK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GB", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR), + COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE), + COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CN", RTW89_CN, RTW89_CN), + COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC), - COUNTRY_REGD("MY", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("PK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("PH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("TH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("VN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AU", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("NZ", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("PG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("CA", RTW89_IC, RTW89_IC), COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK), - COUNTRY_REGD("JM", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("TN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DZ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AD", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AQ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BY", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BJ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BV", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CV", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("CF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TD", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CX", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("CC", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CD", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("CC", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DJ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GQ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ER", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ET", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("FK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("FO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("PF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GL", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GP", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GY", RTW89_FCC, RTW89_NCC), + COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GY", RTW89_NCC, RTW89_NCC), COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("HM", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("VA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("JE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LY", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MV", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ML", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MQ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MU", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("YT", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MD", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MS", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NP", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NC", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NU", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("NF", RTW89_WW, RTW89_ACMA), + COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA), COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("RE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("RW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SH", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("PM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SL", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SB", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GS", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SJ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TJ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TK", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("TO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TC", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("UG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("UZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("VU", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("WF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("EH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ZM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IR", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI), }; static const struct rtw89_regulatory *rtw89_regd_find_reg_by_name(char *alpha2) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 5c6ffca3a324..9e25e53f6c4a 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -1053,10 +1053,10 @@ static void rtw8852a_set_channel_bb(struct rtw89_dev *rtwdev, struct rtw89_channel_params *param, enum rtw89_phy_idx phy_idx) { - bool cck_en = param->center_chan > 14 ? false : true; + bool cck_en = param->center_chan <= 14; u8 pri_ch_idx = param->pri_ch_idx; - if (param->center_chan <= 14) + if (cck_en) rtw8852a_ctrl_sco_cck(rtwdev, param->center_chan, param->primary_chan, param->bandwidth); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c index 3a4fe7207420..253b5f8fc4f9 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c @@ -43384,5248 +43384,6991 @@ static const u8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { - [0][0][0][0][0][0] = 56, - [0][0][0][0][0][1] = 56, - [0][0][0][0][0][2] = 56, - [0][0][0][0][0][3] = 56, - [0][0][0][0][0][4] = 56, - [0][0][0][0][0][5] = 56, - [0][0][0][0][0][6] = 56, - [0][0][0][0][0][7] = 56, - [0][0][0][0][0][8] = 56, - [0][0][0][0][0][9] = 56, - [0][0][0][0][0][10] = 56, - [0][0][0][0][0][11] = 56, - [0][0][0][0][0][12] = 48, - [0][0][0][0][0][13] = 76, - [0][1][0][0][0][0] = 44, - [0][1][0][0][0][1] = 44, - [0][1][0][0][0][2] = 44, - [0][1][0][0][0][3] = 44, - [0][1][0][0][0][4] = 44, - [0][1][0][0][0][5] = 44, - [0][1][0][0][0][6] = 44, - [0][1][0][0][0][7] = 44, - [0][1][0][0][0][8] = 44, - [0][1][0][0][0][9] = 44, - [0][1][0][0][0][10] = 44, - [0][1][0][0][0][11] = 44, - [0][1][0][0][0][12] = 38, - [0][1][0][0][0][13] = 64, - [1][0][0][0][0][0] = 0, - [1][0][0][0][0][1] = 0, - [1][0][0][0][0][2] = 58, - [1][0][0][0][0][3] = 58, - [1][0][0][0][0][4] = 58, - [1][0][0][0][0][5] = 58, - [1][0][0][0][0][6] = 46, - [1][0][0][0][0][7] = 46, - [1][0][0][0][0][8] = 46, - [1][0][0][0][0][9] = 32, - [1][0][0][0][0][10] = 32, - [1][0][0][0][0][11] = 0, - [1][0][0][0][0][12] = 0, - [1][0][0][0][0][13] = 0, - [1][1][0][0][0][0] = 0, - [1][1][0][0][0][1] = 0, - [1][1][0][0][0][2] = 46, - [1][1][0][0][0][3] = 46, - [1][1][0][0][0][4] = 46, - [1][1][0][0][0][5] = 46, - [1][1][0][0][0][6] = 46, - [1][1][0][0][0][7] = 46, - [1][1][0][0][0][8] = 46, - [1][1][0][0][0][9] = 24, - [1][1][0][0][0][10] = 24, - [1][1][0][0][0][11] = 0, - [1][1][0][0][0][12] = 0, - [1][1][0][0][0][13] = 0, - [0][0][1][0][0][0] = 58, - [0][0][1][0][0][1] = 58, - [0][0][1][0][0][2] = 58, - [0][0][1][0][0][3] = 58, - [0][0][1][0][0][4] = 58, - [0][0][1][0][0][5] = 58, - [0][0][1][0][0][6] = 58, - [0][0][1][0][0][7] = 58, - [0][0][1][0][0][8] = 58, - [0][0][1][0][0][9] = 58, - [0][0][1][0][0][10] = 58, - [0][0][1][0][0][11] = 56, - [0][0][1][0][0][12] = 52, - [0][0][1][0][0][13] = 0, - [0][1][1][0][0][0] = 46, - [0][1][1][0][0][1] = 46, - [0][1][1][0][0][2] = 46, - [0][1][1][0][0][3] = 46, - [0][1][1][0][0][4] = 46, - [0][1][1][0][0][5] = 46, - [0][1][1][0][0][6] = 46, - [0][1][1][0][0][7] = 46, - [0][1][1][0][0][8] = 46, - [0][1][1][0][0][9] = 46, - [0][1][1][0][0][10] = 46, - [0][1][1][0][0][11] = 42, - [0][1][1][0][0][12] = 40, - [0][1][1][0][0][13] = 0, - [0][0][2][0][0][0] = 58, - [0][0][2][0][0][1] = 58, - [0][0][2][0][0][2] = 58, - [0][0][2][0][0][3] = 58, - [0][0][2][0][0][4] = 58, - [0][0][2][0][0][5] = 58, - [0][0][2][0][0][6] = 58, - [0][0][2][0][0][7] = 58, - [0][0][2][0][0][8] = 58, - [0][0][2][0][0][9] = 58, - [0][0][2][0][0][10] = 58, - [0][0][2][0][0][11] = 54, - [0][0][2][0][0][12] = 50, - [0][0][2][0][0][13] = 0, - [0][1][2][0][0][0] = 46, - [0][1][2][0][0][1] = 46, - [0][1][2][0][0][2] = 46, - [0][1][2][0][0][3] = 46, - [0][1][2][0][0][4] = 46, - [0][1][2][0][0][5] = 46, - [0][1][2][0][0][6] = 46, - [0][1][2][0][0][7] = 46, - [0][1][2][0][0][8] = 46, - [0][1][2][0][0][9] = 46, - [0][1][2][0][0][10] = 46, - [0][1][2][0][0][11] = 42, - [0][1][2][0][0][12] = 40, - [0][1][2][0][0][13] = 0, - [0][1][2][1][0][0] = 34, - [0][1][2][1][0][1] = 34, - [0][1][2][1][0][2] = 34, - [0][1][2][1][0][3] = 34, - [0][1][2][1][0][4] = 34, - [0][1][2][1][0][5] = 34, - [0][1][2][1][0][6] = 34, - [0][1][2][1][0][7] = 34, - [0][1][2][1][0][8] = 34, - [0][1][2][1][0][9] = 34, - [0][1][2][1][0][10] = 34, - [0][1][2][1][0][11] = 34, - [0][1][2][1][0][12] = 34, - [0][1][2][1][0][13] = 0, - [1][0][2][0][0][0] = 0, - [1][0][2][0][0][1] = 0, - [1][0][2][0][0][2] = 56, - [1][0][2][0][0][3] = 56, - [1][0][2][0][0][4] = 58, - [1][0][2][0][0][5] = 58, - [1][0][2][0][0][6] = 54, - [1][0][2][0][0][7] = 50, - [1][0][2][0][0][8] = 50, - [1][0][2][0][0][9] = 42, - [1][0][2][0][0][10] = 40, - [1][0][2][0][0][11] = 0, - [1][0][2][0][0][12] = 0, - [1][0][2][0][0][13] = 0, - [1][1][2][0][0][0] = 0, - [1][1][2][0][0][1] = 0, - [1][1][2][0][0][2] = 46, - [1][1][2][0][0][3] = 46, - [1][1][2][0][0][4] = 46, - [1][1][2][0][0][5] = 46, - [1][1][2][0][0][6] = 46, - [1][1][2][0][0][7] = 46, - [1][1][2][0][0][8] = 46, - [1][1][2][0][0][9] = 38, - [1][1][2][0][0][10] = 36, - [1][1][2][0][0][11] = 0, - [1][1][2][0][0][12] = 0, - [1][1][2][0][0][13] = 0, - [1][1][2][1][0][0] = 0, - [1][1][2][1][0][1] = 0, - [1][1][2][1][0][2] = 34, - [1][1][2][1][0][3] = 34, - [1][1][2][1][0][4] = 34, - [1][1][2][1][0][5] = 34, - [1][1][2][1][0][6] = 34, - [1][1][2][1][0][7] = 34, - [1][1][2][1][0][8] = 34, - [1][1][2][1][0][9] = 34, - [1][1][2][1][0][10] = 34, - [1][1][2][1][0][11] = 0, - [1][1][2][1][0][12] = 0, - [1][1][2][1][0][13] = 0, - [0][0][0][0][2][0] = 76, - [0][0][0][0][1][0] = 56, - [0][0][0][0][3][0] = 68, - [0][0][0][0][5][0] = 76, - [0][0][0][0][6][0] = 56, - [0][0][0][0][9][0] = 56, - [0][0][0][0][8][0] = 60, - [0][0][0][0][11][0] = 56, - [0][0][0][0][2][1] = 76, - [0][0][0][0][1][1] = 56, - [0][0][0][0][3][1] = 68, - [0][0][0][0][5][1] = 76, - [0][0][0][0][6][1] = 56, - [0][0][0][0][9][1] = 56, - [0][0][0][0][8][1] = 60, - [0][0][0][0][11][1] = 56, - [0][0][0][0][2][2] = 76, - [0][0][0][0][1][2] = 56, - [0][0][0][0][3][2] = 68, - [0][0][0][0][5][2] = 76, - [0][0][0][0][6][2] = 56, - [0][0][0][0][9][2] = 56, - [0][0][0][0][8][2] = 60, - [0][0][0][0][11][2] = 56, - [0][0][0][0][2][3] = 76, - [0][0][0][0][1][3] = 56, - [0][0][0][0][3][3] = 68, - [0][0][0][0][5][3] = 76, - [0][0][0][0][6][3] = 56, - [0][0][0][0][9][3] = 56, - [0][0][0][0][8][3] = 60, - [0][0][0][0][11][3] = 56, - [0][0][0][0][2][4] = 76, - [0][0][0][0][1][4] = 56, - [0][0][0][0][3][4] = 68, - [0][0][0][0][5][4] = 76, - [0][0][0][0][6][4] = 56, - [0][0][0][0][9][4] = 56, - [0][0][0][0][8][4] = 60, - [0][0][0][0][11][4] = 56, - [0][0][0][0][2][5] = 76, - [0][0][0][0][1][5] = 56, - [0][0][0][0][3][5] = 68, - [0][0][0][0][5][5] = 76, - [0][0][0][0][6][5] = 56, - [0][0][0][0][9][5] = 56, - [0][0][0][0][8][5] = 60, - [0][0][0][0][11][5] = 56, - [0][0][0][0][2][6] = 76, - [0][0][0][0][1][6] = 56, - [0][0][0][0][3][6] = 68, - [0][0][0][0][5][6] = 76, - [0][0][0][0][6][6] = 56, - [0][0][0][0][9][6] = 56, - [0][0][0][0][8][6] = 60, - [0][0][0][0][11][6] = 56, - [0][0][0][0][2][7] = 76, - [0][0][0][0][1][7] = 56, - [0][0][0][0][3][7] = 68, - [0][0][0][0][5][7] = 76, - [0][0][0][0][6][7] = 56, - [0][0][0][0][9][7] = 56, - [0][0][0][0][8][7] = 60, - [0][0][0][0][11][7] = 56, - [0][0][0][0][2][8] = 76, - [0][0][0][0][1][8] = 56, - [0][0][0][0][3][8] = 68, - [0][0][0][0][5][8] = 76, - [0][0][0][0][6][8] = 56, - [0][0][0][0][9][8] = 56, - [0][0][0][0][8][8] = 60, - [0][0][0][0][11][8] = 56, - [0][0][0][0][2][9] = 76, - [0][0][0][0][1][9] = 56, - [0][0][0][0][3][9] = 68, - [0][0][0][0][5][9] = 76, - [0][0][0][0][6][9] = 56, - [0][0][0][0][9][9] = 56, - [0][0][0][0][8][9] = 60, - [0][0][0][0][11][9] = 56, - [0][0][0][0][2][10] = 76, - [0][0][0][0][1][10] = 56, - [0][0][0][0][3][10] = 68, - [0][0][0][0][5][10] = 76, - [0][0][0][0][6][10] = 56, - [0][0][0][0][9][10] = 56, - [0][0][0][0][8][10] = 60, - [0][0][0][0][11][10] = 56, - [0][0][0][0][2][11] = 68, - [0][0][0][0][1][11] = 56, - [0][0][0][0][3][11] = 68, - [0][0][0][0][5][11] = 68, - [0][0][0][0][6][11] = 56, - [0][0][0][0][9][11] = 56, - [0][0][0][0][8][11] = 60, - [0][0][0][0][11][11] = 56, - [0][0][0][0][2][12] = 48, - [0][0][0][0][1][12] = 56, - [0][0][0][0][3][12] = 68, - [0][0][0][0][5][12] = 48, - [0][0][0][0][6][12] = 56, - [0][0][0][0][9][12] = 56, - [0][0][0][0][8][12] = 60, - [0][0][0][0][11][12] = 56, - [0][0][0][0][2][13] = 127, - [0][0][0][0][1][13] = 127, - [0][0][0][0][3][13] = 76, - [0][0][0][0][5][13] = 127, - [0][0][0][0][6][13] = 127, - [0][0][0][0][9][13] = 127, - [0][0][0][0][8][13] = 127, - [0][0][0][0][11][13] = 127, - [0][1][0][0][2][0] = 74, - [0][1][0][0][1][0] = 44, - [0][1][0][0][3][0] = 56, - [0][1][0][0][5][0] = 74, - [0][1][0][0][6][0] = 44, - [0][1][0][0][9][0] = 44, - [0][1][0][0][8][0] = 48, - [0][1][0][0][11][0] = 44, - [0][1][0][0][2][1] = 76, - [0][1][0][0][1][1] = 44, - [0][1][0][0][3][1] = 56, - [0][1][0][0][5][1] = 76, - [0][1][0][0][6][1] = 44, - [0][1][0][0][9][1] = 44, - [0][1][0][0][8][1] = 48, - [0][1][0][0][11][1] = 44, - [0][1][0][0][2][2] = 76, - [0][1][0][0][1][2] = 44, - [0][1][0][0][3][2] = 56, - [0][1][0][0][5][2] = 76, - [0][1][0][0][6][2] = 44, - [0][1][0][0][9][2] = 44, - [0][1][0][0][8][2] = 48, - [0][1][0][0][11][2] = 44, - [0][1][0][0][2][3] = 76, - [0][1][0][0][1][3] = 44, - [0][1][0][0][3][3] = 56, - [0][1][0][0][5][3] = 76, - [0][1][0][0][6][3] = 44, - [0][1][0][0][9][3] = 44, - [0][1][0][0][8][3] = 48, - [0][1][0][0][11][3] = 44, - [0][1][0][0][2][4] = 76, - [0][1][0][0][1][4] = 44, - [0][1][0][0][3][4] = 56, - [0][1][0][0][5][4] = 76, - [0][1][0][0][6][4] = 44, - [0][1][0][0][9][4] = 44, - [0][1][0][0][8][4] = 48, - [0][1][0][0][11][4] = 44, - [0][1][0][0][2][5] = 76, - [0][1][0][0][1][5] = 44, - [0][1][0][0][3][5] = 56, - [0][1][0][0][5][5] = 76, - [0][1][0][0][6][5] = 44, - [0][1][0][0][9][5] = 44, - [0][1][0][0][8][5] = 48, - [0][1][0][0][11][5] = 44, - [0][1][0][0][2][6] = 76, - [0][1][0][0][1][6] = 44, - [0][1][0][0][3][6] = 56, - [0][1][0][0][5][6] = 76, - [0][1][0][0][6][6] = 44, - [0][1][0][0][9][6] = 44, - [0][1][0][0][8][6] = 48, - [0][1][0][0][11][6] = 44, - [0][1][0][0][2][7] = 76, - [0][1][0][0][1][7] = 44, - [0][1][0][0][3][7] = 56, - [0][1][0][0][5][7] = 76, - [0][1][0][0][6][7] = 44, - [0][1][0][0][9][7] = 44, - [0][1][0][0][8][7] = 48, - [0][1][0][0][11][7] = 44, - [0][1][0][0][2][8] = 76, - [0][1][0][0][1][8] = 44, - [0][1][0][0][3][8] = 56, - [0][1][0][0][5][8] = 76, - [0][1][0][0][6][8] = 44, - [0][1][0][0][9][8] = 44, - [0][1][0][0][8][8] = 48, - [0][1][0][0][11][8] = 44, - [0][1][0][0][2][9] = 76, - [0][1][0][0][1][9] = 44, - [0][1][0][0][3][9] = 56, - [0][1][0][0][5][9] = 76, - [0][1][0][0][6][9] = 44, - [0][1][0][0][9][9] = 44, - [0][1][0][0][8][9] = 48, - [0][1][0][0][11][9] = 44, - [0][1][0][0][2][10] = 62, - [0][1][0][0][1][10] = 44, - [0][1][0][0][3][10] = 56, - [0][1][0][0][5][10] = 62, - [0][1][0][0][6][10] = 44, - [0][1][0][0][9][10] = 44, - [0][1][0][0][8][10] = 48, - [0][1][0][0][11][10] = 44, - [0][1][0][0][2][11] = 52, - [0][1][0][0][1][11] = 44, - [0][1][0][0][3][11] = 56, - [0][1][0][0][5][11] = 52, - [0][1][0][0][6][11] = 44, - [0][1][0][0][9][11] = 44, - [0][1][0][0][8][11] = 48, - [0][1][0][0][11][11] = 44, - [0][1][0][0][2][12] = 38, - [0][1][0][0][1][12] = 44, - [0][1][0][0][3][12] = 56, - [0][1][0][0][5][12] = 38, - [0][1][0][0][6][12] = 44, - [0][1][0][0][9][12] = 44, - [0][1][0][0][8][12] = 48, - [0][1][0][0][11][12] = 44, - [0][1][0][0][2][13] = 127, - [0][1][0][0][1][13] = 127, - [0][1][0][0][3][13] = 64, - [0][1][0][0][5][13] = 127, - [0][1][0][0][6][13] = 127, - [0][1][0][0][9][13] = 127, - [0][1][0][0][8][13] = 127, - [0][1][0][0][11][13] = 127, - [1][0][0][0][2][0] = 127, - [1][0][0][0][1][0] = 127, - [1][0][0][0][3][0] = 127, - [1][0][0][0][5][0] = 127, - [1][0][0][0][6][0] = 127, - [1][0][0][0][9][0] = 127, - [1][0][0][0][8][0] = 127, - [1][0][0][0][11][0] = 127, - [1][0][0][0][2][1] = 127, - [1][0][0][0][1][1] = 127, - [1][0][0][0][3][1] = 127, - [1][0][0][0][5][1] = 127, - [1][0][0][0][6][1] = 127, - [1][0][0][0][9][1] = 127, - [1][0][0][0][8][1] = 127, - [1][0][0][0][11][1] = 127, - [1][0][0][0][2][2] = 60, - [1][0][0][0][1][2] = 58, - [1][0][0][0][3][2] = 68, - [1][0][0][0][5][2] = 60, - [1][0][0][0][6][2] = 58, - [1][0][0][0][9][2] = 58, - [1][0][0][0][8][2] = 60, - [1][0][0][0][11][2] = 58, - [1][0][0][0][2][3] = 60, - [1][0][0][0][1][3] = 58, - [1][0][0][0][3][3] = 68, - [1][0][0][0][5][3] = 60, - [1][0][0][0][6][3] = 58, - [1][0][0][0][9][3] = 58, - [1][0][0][0][8][3] = 60, - [1][0][0][0][11][3] = 58, - [1][0][0][0][2][4] = 60, - [1][0][0][0][1][4] = 58, - [1][0][0][0][3][4] = 68, - [1][0][0][0][5][4] = 60, - [1][0][0][0][6][4] = 58, - [1][0][0][0][9][4] = 58, - [1][0][0][0][8][4] = 60, - [1][0][0][0][11][4] = 58, - [1][0][0][0][2][5] = 60, - [1][0][0][0][1][5] = 58, - [1][0][0][0][3][5] = 68, - [1][0][0][0][5][5] = 60, - [1][0][0][0][6][5] = 58, - [1][0][0][0][9][5] = 58, - [1][0][0][0][8][5] = 60, - [1][0][0][0][11][5] = 58, - [1][0][0][0][2][6] = 46, - [1][0][0][0][1][6] = 58, - [1][0][0][0][3][6] = 68, - [1][0][0][0][5][6] = 46, - [1][0][0][0][6][6] = 58, - [1][0][0][0][9][6] = 58, - [1][0][0][0][8][6] = 60, - [1][0][0][0][11][6] = 58, - [1][0][0][0][2][7] = 46, - [1][0][0][0][1][7] = 58, - [1][0][0][0][3][7] = 68, - [1][0][0][0][5][7] = 46, - [1][0][0][0][6][7] = 58, - [1][0][0][0][9][7] = 58, - [1][0][0][0][8][7] = 60, - [1][0][0][0][11][7] = 58, - [1][0][0][0][2][8] = 46, - [1][0][0][0][1][8] = 58, - [1][0][0][0][3][8] = 68, - [1][0][0][0][5][8] = 46, - [1][0][0][0][6][8] = 58, - [1][0][0][0][9][8] = 58, - [1][0][0][0][8][8] = 60, - [1][0][0][0][11][8] = 58, - [1][0][0][0][2][9] = 32, - [1][0][0][0][1][9] = 58, - [1][0][0][0][3][9] = 68, - [1][0][0][0][5][9] = 32, - [1][0][0][0][6][9] = 58, - [1][0][0][0][9][9] = 58, - [1][0][0][0][8][9] = 60, - [1][0][0][0][11][9] = 58, - [1][0][0][0][2][10] = 32, - [1][0][0][0][1][10] = 58, - [1][0][0][0][3][10] = 68, - [1][0][0][0][5][10] = 32, - [1][0][0][0][6][10] = 58, - [1][0][0][0][9][10] = 58, - [1][0][0][0][8][10] = 60, - [1][0][0][0][11][10] = 58, - [1][0][0][0][2][11] = 127, - [1][0][0][0][1][11] = 127, - [1][0][0][0][3][11] = 127, - [1][0][0][0][5][11] = 127, - [1][0][0][0][6][11] = 127, - [1][0][0][0][9][11] = 127, - [1][0][0][0][8][11] = 127, - [1][0][0][0][11][11] = 127, - [1][0][0][0][2][12] = 127, - [1][0][0][0][1][12] = 127, - [1][0][0][0][3][12] = 127, - [1][0][0][0][5][12] = 127, - [1][0][0][0][6][12] = 127, - [1][0][0][0][9][12] = 127, - [1][0][0][0][8][12] = 127, - [1][0][0][0][11][12] = 127, - [1][0][0][0][2][13] = 127, - [1][0][0][0][1][13] = 127, - [1][0][0][0][3][13] = 127, - [1][0][0][0][5][13] = 127, - [1][0][0][0][6][13] = 127, - [1][0][0][0][9][13] = 127, - [1][0][0][0][8][13] = 127, - [1][0][0][0][11][13] = 127, - [1][1][0][0][2][0] = 127, - [1][1][0][0][1][0] = 127, - [1][1][0][0][3][0] = 127, - [1][1][0][0][5][0] = 127, - [1][1][0][0][6][0] = 127, - [1][1][0][0][9][0] = 127, - [1][1][0][0][8][0] = 127, - [1][1][0][0][11][0] = 127, - [1][1][0][0][2][1] = 127, - [1][1][0][0][1][1] = 127, - [1][1][0][0][3][1] = 127, - [1][1][0][0][5][1] = 127, - [1][1][0][0][6][1] = 127, - [1][1][0][0][9][1] = 127, - [1][1][0][0][8][1] = 127, - [1][1][0][0][11][1] = 127, - [1][1][0][0][2][2] = 48, - [1][1][0][0][1][2] = 46, - [1][1][0][0][3][2] = 56, - [1][1][0][0][5][2] = 48, - [1][1][0][0][6][2] = 46, - [1][1][0][0][9][2] = 46, - [1][1][0][0][8][2] = 48, - [1][1][0][0][11][2] = 46, - [1][1][0][0][2][3] = 48, - [1][1][0][0][1][3] = 46, - [1][1][0][0][3][3] = 56, - [1][1][0][0][5][3] = 48, - [1][1][0][0][6][3] = 46, - [1][1][0][0][9][3] = 46, - [1][1][0][0][8][3] = 48, - [1][1][0][0][11][3] = 46, - [1][1][0][0][2][4] = 48, - [1][1][0][0][1][4] = 46, - [1][1][0][0][3][4] = 56, - [1][1][0][0][5][4] = 48, - [1][1][0][0][6][4] = 46, - [1][1][0][0][9][4] = 46, - [1][1][0][0][8][4] = 48, - [1][1][0][0][11][4] = 46, - [1][1][0][0][2][5] = 58, - [1][1][0][0][1][5] = 46, - [1][1][0][0][3][5] = 56, - [1][1][0][0][5][5] = 58, - [1][1][0][0][6][5] = 46, - [1][1][0][0][9][5] = 46, - [1][1][0][0][8][5] = 48, - [1][1][0][0][11][5] = 46, - [1][1][0][0][2][6] = 46, - [1][1][0][0][1][6] = 46, - [1][1][0][0][3][6] = 56, - [1][1][0][0][5][6] = 46, - [1][1][0][0][6][6] = 46, - [1][1][0][0][9][6] = 46, - [1][1][0][0][8][6] = 48, - [1][1][0][0][11][6] = 46, - [1][1][0][0][2][7] = 46, - [1][1][0][0][1][7] = 46, - [1][1][0][0][3][7] = 56, - [1][1][0][0][5][7] = 46, - [1][1][0][0][6][7] = 46, - [1][1][0][0][9][7] = 46, - [1][1][0][0][8][7] = 48, - [1][1][0][0][11][7] = 46, - [1][1][0][0][2][8] = 46, - [1][1][0][0][1][8] = 46, - [1][1][0][0][3][8] = 56, - [1][1][0][0][5][8] = 46, - [1][1][0][0][6][8] = 46, - [1][1][0][0][9][8] = 46, - [1][1][0][0][8][8] = 48, - [1][1][0][0][11][8] = 46, - [1][1][0][0][2][9] = 24, - [1][1][0][0][1][9] = 46, - [1][1][0][0][3][9] = 56, - [1][1][0][0][5][9] = 24, - [1][1][0][0][6][9] = 46, - [1][1][0][0][9][9] = 46, - [1][1][0][0][8][9] = 48, - [1][1][0][0][11][9] = 46, - [1][1][0][0][2][10] = 24, - [1][1][0][0][1][10] = 46, - [1][1][0][0][3][10] = 56, - [1][1][0][0][5][10] = 24, - [1][1][0][0][6][10] = 46, - [1][1][0][0][9][10] = 46, - [1][1][0][0][8][10] = 48, - [1][1][0][0][11][10] = 46, - [1][1][0][0][2][11] = 127, - [1][1][0][0][1][11] = 127, - [1][1][0][0][3][11] = 127, - [1][1][0][0][5][11] = 127, - [1][1][0][0][6][11] = 127, - [1][1][0][0][9][11] = 127, - [1][1][0][0][8][11] = 127, - [1][1][0][0][11][11] = 127, - [1][1][0][0][2][12] = 127, - [1][1][0][0][1][12] = 127, - [1][1][0][0][3][12] = 127, - [1][1][0][0][5][12] = 127, - [1][1][0][0][6][12] = 127, - [1][1][0][0][9][12] = 127, - [1][1][0][0][8][12] = 127, - [1][1][0][0][11][12] = 127, - [1][1][0][0][2][13] = 127, - [1][1][0][0][1][13] = 127, - [1][1][0][0][3][13] = 127, - [1][1][0][0][5][13] = 127, - [1][1][0][0][6][13] = 127, - [1][1][0][0][9][13] = 127, - [1][1][0][0][8][13] = 127, - [1][1][0][0][11][13] = 127, - [0][0][1][0][2][0] = 66, - [0][0][1][0][1][0] = 58, - [0][0][1][0][3][0] = 76, - [0][0][1][0][5][0] = 66, - [0][0][1][0][6][0] = 58, - [0][0][1][0][9][0] = 58, - [0][0][1][0][8][0] = 60, - [0][0][1][0][11][0] = 58, - [0][0][1][0][2][1] = 66, - [0][0][1][0][1][1] = 58, - [0][0][1][0][3][1] = 76, - [0][0][1][0][5][1] = 66, - [0][0][1][0][6][1] = 58, - [0][0][1][0][9][1] = 58, - [0][0][1][0][8][1] = 60, - [0][0][1][0][11][1] = 58, - [0][0][1][0][2][2] = 70, - [0][0][1][0][1][2] = 58, - [0][0][1][0][3][2] = 76, - [0][0][1][0][5][2] = 70, - [0][0][1][0][6][2] = 58, - [0][0][1][0][9][2] = 58, - [0][0][1][0][8][2] = 60, - [0][0][1][0][11][2] = 58, - [0][0][1][0][2][3] = 74, - [0][0][1][0][1][3] = 58, - [0][0][1][0][3][3] = 76, - [0][0][1][0][5][3] = 74, - [0][0][1][0][6][3] = 58, - [0][0][1][0][9][3] = 58, - [0][0][1][0][8][3] = 60, - [0][0][1][0][11][3] = 58, - [0][0][1][0][2][4] = 78, - [0][0][1][0][1][4] = 58, - [0][0][1][0][3][4] = 76, - [0][0][1][0][5][4] = 78, - [0][0][1][0][6][4] = 58, - [0][0][1][0][9][4] = 58, - [0][0][1][0][8][4] = 60, - [0][0][1][0][11][4] = 58, - [0][0][1][0][2][5] = 78, - [0][0][1][0][1][5] = 58, - [0][0][1][0][3][5] = 76, - [0][0][1][0][5][5] = 78, - [0][0][1][0][6][5] = 58, - [0][0][1][0][9][5] = 58, - [0][0][1][0][8][5] = 60, - [0][0][1][0][11][5] = 58, - [0][0][1][0][2][6] = 78, - [0][0][1][0][1][6] = 58, - [0][0][1][0][3][6] = 76, - [0][0][1][0][5][6] = 78, - [0][0][1][0][6][6] = 58, - [0][0][1][0][9][6] = 58, - [0][0][1][0][8][6] = 60, - [0][0][1][0][11][6] = 58, - [0][0][1][0][2][7] = 74, - [0][0][1][0][1][7] = 58, - [0][0][1][0][3][7] = 76, - [0][0][1][0][5][7] = 74, - [0][0][1][0][6][7] = 58, - [0][0][1][0][9][7] = 58, - [0][0][1][0][8][7] = 60, - [0][0][1][0][11][7] = 58, - [0][0][1][0][2][8] = 70, - [0][0][1][0][1][8] = 58, - [0][0][1][0][3][8] = 76, - [0][0][1][0][5][8] = 70, - [0][0][1][0][6][8] = 58, - [0][0][1][0][9][8] = 58, - [0][0][1][0][8][8] = 60, - [0][0][1][0][11][8] = 58, - [0][0][1][0][2][9] = 66, - [0][0][1][0][1][9] = 58, - [0][0][1][0][3][9] = 76, - [0][0][1][0][5][9] = 66, - [0][0][1][0][6][9] = 58, - [0][0][1][0][9][9] = 58, - [0][0][1][0][8][9] = 60, - [0][0][1][0][11][9] = 58, - [0][0][1][0][2][10] = 66, - [0][0][1][0][1][10] = 58, - [0][0][1][0][3][10] = 76, - [0][0][1][0][5][10] = 66, - [0][0][1][0][6][10] = 58, - [0][0][1][0][9][10] = 58, - [0][0][1][0][8][10] = 60, - [0][0][1][0][11][10] = 58, - [0][0][1][0][2][11] = 56, - [0][0][1][0][1][11] = 58, - [0][0][1][0][3][11] = 76, - [0][0][1][0][5][11] = 56, - [0][0][1][0][6][11] = 58, - [0][0][1][0][9][11] = 58, - [0][0][1][0][8][11] = 60, - [0][0][1][0][11][11] = 58, - [0][0][1][0][2][12] = 52, - [0][0][1][0][1][12] = 58, - [0][0][1][0][3][12] = 76, - [0][0][1][0][5][12] = 52, - [0][0][1][0][6][12] = 58, - [0][0][1][0][9][12] = 58, - [0][0][1][0][8][12] = 60, - [0][0][1][0][11][12] = 58, - [0][0][1][0][2][13] = 127, - [0][0][1][0][1][13] = 127, - [0][0][1][0][3][13] = 127, - [0][0][1][0][5][13] = 127, - [0][0][1][0][6][13] = 127, - [0][0][1][0][9][13] = 127, - [0][0][1][0][8][13] = 127, - [0][0][1][0][11][13] = 127, - [0][1][1][0][2][0] = 62, - [0][1][1][0][1][0] = 46, - [0][1][1][0][3][0] = 64, - [0][1][1][0][5][0] = 62, - [0][1][1][0][6][0] = 46, - [0][1][1][0][9][0] = 46, - [0][1][1][0][8][0] = 48, - [0][1][1][0][11][0] = 46, - [0][1][1][0][2][1] = 62, - [0][1][1][0][1][1] = 46, - [0][1][1][0][3][1] = 64, - [0][1][1][0][5][1] = 62, - [0][1][1][0][6][1] = 46, - [0][1][1][0][9][1] = 46, - [0][1][1][0][8][1] = 48, - [0][1][1][0][11][1] = 46, - [0][1][1][0][2][2] = 66, - [0][1][1][0][1][2] = 46, - [0][1][1][0][3][2] = 64, - [0][1][1][0][5][2] = 66, - [0][1][1][0][6][2] = 46, - [0][1][1][0][9][2] = 46, - [0][1][1][0][8][2] = 48, - [0][1][1][0][11][2] = 46, - [0][1][1][0][2][3] = 70, - [0][1][1][0][1][3] = 46, - [0][1][1][0][3][3] = 64, - [0][1][1][0][5][3] = 70, - [0][1][1][0][6][3] = 46, - [0][1][1][0][9][3] = 46, - [0][1][1][0][8][3] = 48, - [0][1][1][0][11][3] = 46, - [0][1][1][0][2][4] = 78, - [0][1][1][0][1][4] = 46, - [0][1][1][0][3][4] = 64, - [0][1][1][0][5][4] = 78, - [0][1][1][0][6][4] = 46, - [0][1][1][0][9][4] = 46, - [0][1][1][0][8][4] = 48, - [0][1][1][0][11][4] = 46, - [0][1][1][0][2][5] = 78, - [0][1][1][0][1][5] = 46, - [0][1][1][0][3][5] = 64, - [0][1][1][0][5][5] = 78, - [0][1][1][0][6][5] = 46, - [0][1][1][0][9][5] = 46, - [0][1][1][0][8][5] = 48, - [0][1][1][0][11][5] = 46, - [0][1][1][0][2][6] = 78, - [0][1][1][0][1][6] = 46, - [0][1][1][0][3][6] = 64, - [0][1][1][0][5][6] = 78, - [0][1][1][0][6][6] = 46, - [0][1][1][0][9][6] = 46, - [0][1][1][0][8][6] = 48, - [0][1][1][0][11][6] = 46, - [0][1][1][0][2][7] = 70, - [0][1][1][0][1][7] = 46, - [0][1][1][0][3][7] = 64, - [0][1][1][0][5][7] = 70, - [0][1][1][0][6][7] = 46, - [0][1][1][0][9][7] = 46, - [0][1][1][0][8][7] = 48, - [0][1][1][0][11][7] = 46, - [0][1][1][0][2][8] = 66, - [0][1][1][0][1][8] = 46, - [0][1][1][0][3][8] = 64, - [0][1][1][0][5][8] = 66, - [0][1][1][0][6][8] = 46, - [0][1][1][0][9][8] = 46, - [0][1][1][0][8][8] = 48, - [0][1][1][0][11][8] = 46, - [0][1][1][0][2][9] = 62, - [0][1][1][0][1][9] = 46, - [0][1][1][0][3][9] = 64, - [0][1][1][0][5][9] = 62, - [0][1][1][0][6][9] = 46, - [0][1][1][0][9][9] = 46, - [0][1][1][0][8][9] = 48, - [0][1][1][0][11][9] = 46, - [0][1][1][0][2][10] = 62, - [0][1][1][0][1][10] = 46, - [0][1][1][0][3][10] = 64, - [0][1][1][0][5][10] = 62, - [0][1][1][0][6][10] = 46, - [0][1][1][0][9][10] = 46, - [0][1][1][0][8][10] = 48, - [0][1][1][0][11][10] = 46, - [0][1][1][0][2][11] = 42, - [0][1][1][0][1][11] = 46, - [0][1][1][0][3][11] = 64, - [0][1][1][0][5][11] = 42, - [0][1][1][0][6][11] = 46, - [0][1][1][0][9][11] = 46, - [0][1][1][0][8][11] = 48, - [0][1][1][0][11][11] = 46, - [0][1][1][0][2][12] = 40, - [0][1][1][0][1][12] = 46, - [0][1][1][0][3][12] = 64, - [0][1][1][0][5][12] = 40, - [0][1][1][0][6][12] = 46, - [0][1][1][0][9][12] = 46, - [0][1][1][0][8][12] = 48, - [0][1][1][0][11][12] = 46, - [0][1][1][0][2][13] = 127, - [0][1][1][0][1][13] = 127, - [0][1][1][0][3][13] = 127, - [0][1][1][0][5][13] = 127, - [0][1][1][0][6][13] = 127, - [0][1][1][0][9][13] = 127, - [0][1][1][0][8][13] = 127, - [0][1][1][0][11][13] = 127, - [0][0][2][0][2][0] = 66, - [0][0][2][0][1][0] = 58, - [0][0][2][0][3][0] = 76, - [0][0][2][0][5][0] = 66, - [0][0][2][0][6][0] = 58, - [0][0][2][0][9][0] = 58, - [0][0][2][0][8][0] = 60, - [0][0][2][0][11][0] = 58, - [0][0][2][0][2][1] = 66, - [0][0][2][0][1][1] = 58, - [0][0][2][0][3][1] = 76, - [0][0][2][0][5][1] = 66, - [0][0][2][0][6][1] = 58, - [0][0][2][0][9][1] = 58, - [0][0][2][0][8][1] = 60, - [0][0][2][0][11][1] = 58, - [0][0][2][0][2][2] = 70, - [0][0][2][0][1][2] = 58, - [0][0][2][0][3][2] = 76, - [0][0][2][0][5][2] = 70, - [0][0][2][0][6][2] = 58, - [0][0][2][0][9][2] = 58, - [0][0][2][0][8][2] = 60, - [0][0][2][0][11][2] = 58, - [0][0][2][0][2][3] = 74, - [0][0][2][0][1][3] = 58, - [0][0][2][0][3][3] = 76, - [0][0][2][0][5][3] = 74, - [0][0][2][0][6][3] = 58, - [0][0][2][0][9][3] = 58, - [0][0][2][0][8][3] = 60, - [0][0][2][0][11][3] = 58, - [0][0][2][0][2][4] = 76, - [0][0][2][0][1][4] = 58, - [0][0][2][0][3][4] = 76, - [0][0][2][0][5][4] = 76, - [0][0][2][0][6][4] = 58, - [0][0][2][0][9][4] = 58, - [0][0][2][0][8][4] = 60, - [0][0][2][0][11][4] = 58, - [0][0][2][0][2][5] = 76, - [0][0][2][0][1][5] = 58, - [0][0][2][0][3][5] = 76, - [0][0][2][0][5][5] = 76, - [0][0][2][0][6][5] = 58, - [0][0][2][0][9][5] = 58, - [0][0][2][0][8][5] = 60, - [0][0][2][0][11][5] = 58, - [0][0][2][0][2][6] = 76, - [0][0][2][0][1][6] = 58, - [0][0][2][0][3][6] = 76, - [0][0][2][0][5][6] = 76, - [0][0][2][0][6][6] = 58, - [0][0][2][0][9][6] = 58, - [0][0][2][0][8][6] = 60, - [0][0][2][0][11][6] = 58, - [0][0][2][0][2][7] = 74, - [0][0][2][0][1][7] = 58, - [0][0][2][0][3][7] = 76, - [0][0][2][0][5][7] = 74, - [0][0][2][0][6][7] = 58, - [0][0][2][0][9][7] = 58, - [0][0][2][0][8][7] = 60, - [0][0][2][0][11][7] = 58, - [0][0][2][0][2][8] = 70, - [0][0][2][0][1][8] = 58, - [0][0][2][0][3][8] = 76, - [0][0][2][0][5][8] = 70, - [0][0][2][0][6][8] = 58, - [0][0][2][0][9][8] = 58, - [0][0][2][0][8][8] = 60, - [0][0][2][0][11][8] = 58, - [0][0][2][0][2][9] = 66, - [0][0][2][0][1][9] = 58, - [0][0][2][0][3][9] = 76, - [0][0][2][0][5][9] = 66, - [0][0][2][0][6][9] = 58, - [0][0][2][0][9][9] = 58, - [0][0][2][0][8][9] = 60, - [0][0][2][0][11][9] = 58, - [0][0][2][0][2][10] = 66, - [0][0][2][0][1][10] = 58, - [0][0][2][0][3][10] = 76, - [0][0][2][0][5][10] = 66, - [0][0][2][0][6][10] = 58, - [0][0][2][0][9][10] = 58, - [0][0][2][0][8][10] = 60, - [0][0][2][0][11][10] = 58, - [0][0][2][0][2][11] = 54, - [0][0][2][0][1][11] = 58, - [0][0][2][0][3][11] = 76, - [0][0][2][0][5][11] = 54, - [0][0][2][0][6][11] = 58, - [0][0][2][0][9][11] = 58, - [0][0][2][0][8][11] = 60, - [0][0][2][0][11][11] = 58, - [0][0][2][0][2][12] = 50, - [0][0][2][0][1][12] = 58, - [0][0][2][0][3][12] = 76, - [0][0][2][0][5][12] = 50, - [0][0][2][0][6][12] = 58, - [0][0][2][0][9][12] = 58, - [0][0][2][0][8][12] = 60, - [0][0][2][0][11][12] = 58, - [0][0][2][0][2][13] = 127, - [0][0][2][0][1][13] = 127, - [0][0][2][0][3][13] = 127, - [0][0][2][0][5][13] = 127, - [0][0][2][0][6][13] = 127, - [0][0][2][0][9][13] = 127, - [0][0][2][0][8][13] = 127, - [0][0][2][0][11][13] = 127, - [0][1][2][0][2][0] = 62, - [0][1][2][0][1][0] = 46, - [0][1][2][0][3][0] = 64, - [0][1][2][0][5][0] = 62, - [0][1][2][0][6][0] = 46, - [0][1][2][0][9][0] = 46, - [0][1][2][0][8][0] = 48, - [0][1][2][0][11][0] = 46, - [0][1][2][0][2][1] = 62, - [0][1][2][0][1][1] = 46, - [0][1][2][0][3][1] = 64, - [0][1][2][0][5][1] = 62, - [0][1][2][0][6][1] = 46, - [0][1][2][0][9][1] = 46, - [0][1][2][0][8][1] = 48, - [0][1][2][0][11][1] = 46, - [0][1][2][0][2][2] = 66, - [0][1][2][0][1][2] = 46, - [0][1][2][0][3][2] = 64, - [0][1][2][0][5][2] = 66, - [0][1][2][0][6][2] = 46, - [0][1][2][0][9][2] = 46, - [0][1][2][0][8][2] = 48, - [0][1][2][0][11][2] = 46, - [0][1][2][0][2][3] = 70, - [0][1][2][0][1][3] = 46, - [0][1][2][0][3][3] = 64, - [0][1][2][0][5][3] = 70, - [0][1][2][0][6][3] = 46, - [0][1][2][0][9][3] = 46, - [0][1][2][0][8][3] = 48, - [0][1][2][0][11][3] = 46, - [0][1][2][0][2][4] = 76, - [0][1][2][0][1][4] = 46, - [0][1][2][0][3][4] = 64, - [0][1][2][0][5][4] = 76, - [0][1][2][0][6][4] = 46, - [0][1][2][0][9][4] = 46, - [0][1][2][0][8][4] = 48, - [0][1][2][0][11][4] = 46, - [0][1][2][0][2][5] = 76, - [0][1][2][0][1][5] = 46, - [0][1][2][0][3][5] = 64, - [0][1][2][0][5][5] = 76, - [0][1][2][0][6][5] = 46, - [0][1][2][0][9][5] = 46, - [0][1][2][0][8][5] = 48, - [0][1][2][0][11][5] = 46, - [0][1][2][0][2][6] = 76, - [0][1][2][0][1][6] = 46, - [0][1][2][0][3][6] = 64, - [0][1][2][0][5][6] = 76, - [0][1][2][0][6][6] = 46, - [0][1][2][0][9][6] = 46, - [0][1][2][0][8][6] = 48, - [0][1][2][0][11][6] = 46, - [0][1][2][0][2][7] = 68, - [0][1][2][0][1][7] = 46, - [0][1][2][0][3][7] = 64, - [0][1][2][0][5][7] = 68, - [0][1][2][0][6][7] = 46, - [0][1][2][0][9][7] = 46, - [0][1][2][0][8][7] = 48, - [0][1][2][0][11][7] = 46, - [0][1][2][0][2][8] = 64, - [0][1][2][0][1][8] = 46, - [0][1][2][0][3][8] = 64, - [0][1][2][0][5][8] = 64, - [0][1][2][0][6][8] = 46, - [0][1][2][0][9][8] = 46, - [0][1][2][0][8][8] = 48, - [0][1][2][0][11][8] = 46, - [0][1][2][0][2][9] = 60, - [0][1][2][0][1][9] = 46, - [0][1][2][0][3][9] = 64, - [0][1][2][0][5][9] = 60, - [0][1][2][0][6][9] = 46, - [0][1][2][0][9][9] = 46, - [0][1][2][0][8][9] = 48, - [0][1][2][0][11][9] = 46, - [0][1][2][0][2][10] = 60, - [0][1][2][0][1][10] = 46, - [0][1][2][0][3][10] = 64, - [0][1][2][0][5][10] = 60, - [0][1][2][0][6][10] = 46, - [0][1][2][0][9][10] = 46, - [0][1][2][0][8][10] = 48, - [0][1][2][0][11][10] = 46, - [0][1][2][0][2][11] = 42, - [0][1][2][0][1][11] = 46, - [0][1][2][0][3][11] = 64, - [0][1][2][0][5][11] = 42, - [0][1][2][0][6][11] = 46, - [0][1][2][0][9][11] = 46, - [0][1][2][0][8][11] = 48, - [0][1][2][0][11][11] = 46, - [0][1][2][0][2][12] = 40, - [0][1][2][0][1][12] = 46, - [0][1][2][0][3][12] = 64, - [0][1][2][0][5][12] = 40, - [0][1][2][0][6][12] = 46, - [0][1][2][0][9][12] = 46, - [0][1][2][0][8][12] = 48, - [0][1][2][0][11][12] = 46, - [0][1][2][0][2][13] = 127, - [0][1][2][0][1][13] = 127, - [0][1][2][0][3][13] = 127, - [0][1][2][0][5][13] = 127, - [0][1][2][0][6][13] = 127, - [0][1][2][0][9][13] = 127, - [0][1][2][0][8][13] = 127, - [0][1][2][0][11][13] = 127, - [0][1][2][1][2][0] = 62, - [0][1][2][1][1][0] = 34, - [0][1][2][1][3][0] = 64, - [0][1][2][1][5][0] = 62, - [0][1][2][1][6][0] = 34, - [0][1][2][1][9][0] = 34, - [0][1][2][1][8][0] = 36, - [0][1][2][1][11][0] = 34, - [0][1][2][1][2][1] = 62, - [0][1][2][1][1][1] = 34, - [0][1][2][1][3][1] = 64, - [0][1][2][1][5][1] = 62, - [0][1][2][1][6][1] = 34, - [0][1][2][1][9][1] = 34, - [0][1][2][1][8][1] = 36, - [0][1][2][1][11][1] = 34, - [0][1][2][1][2][2] = 66, - [0][1][2][1][1][2] = 34, - [0][1][2][1][3][2] = 64, - [0][1][2][1][5][2] = 66, - [0][1][2][1][6][2] = 34, - [0][1][2][1][9][2] = 34, - [0][1][2][1][8][2] = 36, - [0][1][2][1][11][2] = 34, - [0][1][2][1][2][3] = 70, - [0][1][2][1][1][3] = 34, - [0][1][2][1][3][3] = 64, - [0][1][2][1][5][3] = 70, - [0][1][2][1][6][3] = 34, - [0][1][2][1][9][3] = 34, - [0][1][2][1][8][3] = 36, - [0][1][2][1][11][3] = 34, - [0][1][2][1][2][4] = 76, - [0][1][2][1][1][4] = 34, - [0][1][2][1][3][4] = 64, - [0][1][2][1][5][4] = 76, - [0][1][2][1][6][4] = 34, - [0][1][2][1][9][4] = 34, - [0][1][2][1][8][4] = 36, - [0][1][2][1][11][4] = 34, - [0][1][2][1][2][5] = 76, - [0][1][2][1][1][5] = 34, - [0][1][2][1][3][5] = 64, - [0][1][2][1][5][5] = 76, - [0][1][2][1][6][5] = 34, - [0][1][2][1][9][5] = 34, - [0][1][2][1][8][5] = 36, - [0][1][2][1][11][5] = 34, - [0][1][2][1][2][6] = 76, - [0][1][2][1][1][6] = 34, - [0][1][2][1][3][6] = 64, - [0][1][2][1][5][6] = 76, - [0][1][2][1][6][6] = 34, - [0][1][2][1][9][6] = 34, - [0][1][2][1][8][6] = 36, - [0][1][2][1][11][6] = 34, - [0][1][2][1][2][7] = 68, - [0][1][2][1][1][7] = 34, - [0][1][2][1][3][7] = 64, - [0][1][2][1][5][7] = 68, - [0][1][2][1][6][7] = 34, - [0][1][2][1][9][7] = 34, - [0][1][2][1][8][7] = 36, - [0][1][2][1][11][7] = 34, - [0][1][2][1][2][8] = 64, - [0][1][2][1][1][8] = 34, - [0][1][2][1][3][8] = 64, - [0][1][2][1][5][8] = 64, - [0][1][2][1][6][8] = 34, - [0][1][2][1][9][8] = 34, - [0][1][2][1][8][8] = 36, - [0][1][2][1][11][8] = 34, - [0][1][2][1][2][9] = 60, - [0][1][2][1][1][9] = 34, - [0][1][2][1][3][9] = 64, - [0][1][2][1][5][9] = 60, - [0][1][2][1][6][9] = 34, - [0][1][2][1][9][9] = 34, - [0][1][2][1][8][9] = 36, - [0][1][2][1][11][9] = 34, - [0][1][2][1][2][10] = 60, - [0][1][2][1][1][10] = 34, - [0][1][2][1][3][10] = 64, - [0][1][2][1][5][10] = 60, - [0][1][2][1][6][10] = 34, - [0][1][2][1][9][10] = 34, - [0][1][2][1][8][10] = 36, - [0][1][2][1][11][10] = 34, - [0][1][2][1][2][11] = 42, - [0][1][2][1][1][11] = 34, - [0][1][2][1][3][11] = 64, - [0][1][2][1][5][11] = 42, - [0][1][2][1][6][11] = 34, - [0][1][2][1][9][11] = 34, - [0][1][2][1][8][11] = 36, - [0][1][2][1][11][11] = 34, - [0][1][2][1][2][12] = 40, - [0][1][2][1][1][12] = 34, - [0][1][2][1][3][12] = 64, - [0][1][2][1][5][12] = 40, - [0][1][2][1][6][12] = 34, - [0][1][2][1][9][12] = 34, - [0][1][2][1][8][12] = 36, - [0][1][2][1][11][12] = 34, - [0][1][2][1][2][13] = 127, - [0][1][2][1][1][13] = 127, - [0][1][2][1][3][13] = 127, - [0][1][2][1][5][13] = 127, - [0][1][2][1][6][13] = 127, - [0][1][2][1][9][13] = 127, - [0][1][2][1][8][13] = 127, - [0][1][2][1][11][13] = 127, - [1][0][2][0][2][0] = 127, - [1][0][2][0][1][0] = 127, - [1][0][2][0][3][0] = 127, - [1][0][2][0][5][0] = 127, - [1][0][2][0][6][0] = 127, - [1][0][2][0][9][0] = 127, - [1][0][2][0][8][0] = 127, - [1][0][2][0][11][0] = 127, - [1][0][2][0][2][1] = 127, - [1][0][2][0][1][1] = 127, - [1][0][2][0][3][1] = 127, - [1][0][2][0][5][1] = 127, - [1][0][2][0][6][1] = 127, - [1][0][2][0][9][1] = 127, - [1][0][2][0][8][1] = 127, - [1][0][2][0][11][1] = 127, - [1][0][2][0][2][2] = 56, - [1][0][2][0][1][2] = 58, - [1][0][2][0][3][2] = 76, - [1][0][2][0][5][2] = 56, - [1][0][2][0][6][2] = 58, - [1][0][2][0][9][2] = 58, - [1][0][2][0][8][2] = 60, - [1][0][2][0][11][2] = 58, - [1][0][2][0][2][3] = 56, - [1][0][2][0][1][3] = 58, - [1][0][2][0][3][3] = 76, - [1][0][2][0][5][3] = 56, - [1][0][2][0][6][3] = 58, - [1][0][2][0][9][3] = 58, - [1][0][2][0][8][3] = 60, - [1][0][2][0][11][3] = 58, - [1][0][2][0][2][4] = 60, - [1][0][2][0][1][4] = 58, - [1][0][2][0][3][4] = 76, - [1][0][2][0][5][4] = 60, - [1][0][2][0][6][4] = 58, - [1][0][2][0][9][4] = 58, - [1][0][2][0][8][4] = 60, - [1][0][2][0][11][4] = 58, - [1][0][2][0][2][5] = 64, - [1][0][2][0][1][5] = 58, - [1][0][2][0][3][5] = 76, - [1][0][2][0][5][5] = 64, - [1][0][2][0][6][5] = 58, - [1][0][2][0][9][5] = 58, - [1][0][2][0][8][5] = 60, - [1][0][2][0][11][5] = 58, - [1][0][2][0][2][6] = 54, - [1][0][2][0][1][6] = 58, - [1][0][2][0][3][6] = 76, - [1][0][2][0][5][6] = 54, - [1][0][2][0][6][6] = 58, - [1][0][2][0][9][6] = 58, - [1][0][2][0][8][6] = 60, - [1][0][2][0][11][6] = 58, - [1][0][2][0][2][7] = 50, - [1][0][2][0][1][7] = 58, - [1][0][2][0][3][7] = 76, - [1][0][2][0][5][7] = 50, - [1][0][2][0][6][7] = 58, - [1][0][2][0][9][7] = 58, - [1][0][2][0][8][7] = 60, - [1][0][2][0][11][7] = 58, - [1][0][2][0][2][8] = 50, - [1][0][2][0][1][8] = 58, - [1][0][2][0][3][8] = 76, - [1][0][2][0][5][8] = 50, - [1][0][2][0][6][8] = 58, - [1][0][2][0][9][8] = 58, - [1][0][2][0][8][8] = 60, - [1][0][2][0][11][8] = 58, - [1][0][2][0][2][9] = 42, - [1][0][2][0][1][9] = 58, - [1][0][2][0][3][9] = 76, - [1][0][2][0][5][9] = 42, - [1][0][2][0][6][9] = 58, - [1][0][2][0][9][9] = 58, - [1][0][2][0][8][9] = 60, - [1][0][2][0][11][9] = 58, - [1][0][2][0][2][10] = 40, - [1][0][2][0][1][10] = 58, - [1][0][2][0][3][10] = 76, - [1][0][2][0][5][10] = 40, - [1][0][2][0][6][10] = 58, - [1][0][2][0][9][10] = 58, - [1][0][2][0][8][10] = 60, - [1][0][2][0][11][10] = 58, - [1][0][2][0][2][11] = 127, - [1][0][2][0][1][11] = 127, - [1][0][2][0][3][11] = 127, - [1][0][2][0][5][11] = 127, - [1][0][2][0][6][11] = 127, - [1][0][2][0][9][11] = 127, - [1][0][2][0][8][11] = 127, - [1][0][2][0][11][11] = 127, - [1][0][2][0][2][12] = 127, - [1][0][2][0][1][12] = 127, - [1][0][2][0][3][12] = 127, - [1][0][2][0][5][12] = 127, - [1][0][2][0][6][12] = 127, - [1][0][2][0][9][12] = 127, - [1][0][2][0][8][12] = 127, - [1][0][2][0][11][12] = 127, - [1][0][2][0][2][13] = 127, - [1][0][2][0][1][13] = 127, - [1][0][2][0][3][13] = 127, - [1][0][2][0][5][13] = 127, - [1][0][2][0][6][13] = 127, - [1][0][2][0][9][13] = 127, - [1][0][2][0][8][13] = 127, - [1][0][2][0][11][13] = 127, - [1][1][2][0][2][0] = 127, - [1][1][2][0][1][0] = 127, - [1][1][2][0][3][0] = 127, - [1][1][2][0][5][0] = 127, - [1][1][2][0][6][0] = 127, - [1][1][2][0][9][0] = 127, - [1][1][2][0][8][0] = 127, - [1][1][2][0][11][0] = 127, - [1][1][2][0][2][1] = 127, - [1][1][2][0][1][1] = 127, - [1][1][2][0][3][1] = 127, - [1][1][2][0][5][1] = 127, - [1][1][2][0][6][1] = 127, - [1][1][2][0][9][1] = 127, - [1][1][2][0][8][1] = 127, - [1][1][2][0][11][1] = 127, - [1][1][2][0][2][2] = 52, - [1][1][2][0][1][2] = 46, - [1][1][2][0][3][2] = 64, - [1][1][2][0][5][2] = 52, - [1][1][2][0][6][2] = 46, - [1][1][2][0][9][2] = 46, - [1][1][2][0][8][2] = 48, - [1][1][2][0][11][2] = 46, - [1][1][2][0][2][3] = 52, - [1][1][2][0][1][3] = 46, - [1][1][2][0][3][3] = 64, - [1][1][2][0][5][3] = 52, - [1][1][2][0][6][3] = 46, - [1][1][2][0][9][3] = 46, - [1][1][2][0][8][3] = 48, - [1][1][2][0][11][3] = 46, - [1][1][2][0][2][4] = 56, - [1][1][2][0][1][4] = 46, - [1][1][2][0][3][4] = 64, - [1][1][2][0][5][4] = 56, - [1][1][2][0][6][4] = 46, - [1][1][2][0][9][4] = 46, - [1][1][2][0][8][4] = 48, - [1][1][2][0][11][4] = 46, - [1][1][2][0][2][5] = 60, - [1][1][2][0][1][5] = 46, - [1][1][2][0][3][5] = 64, - [1][1][2][0][5][5] = 60, - [1][1][2][0][6][5] = 46, - [1][1][2][0][9][5] = 46, - [1][1][2][0][8][5] = 48, - [1][1][2][0][11][5] = 46, - [1][1][2][0][2][6] = 54, - [1][1][2][0][1][6] = 46, - [1][1][2][0][3][6] = 64, - [1][1][2][0][5][6] = 52, - [1][1][2][0][6][6] = 46, - [1][1][2][0][9][6] = 46, - [1][1][2][0][8][6] = 48, - [1][1][2][0][11][6] = 46, - [1][1][2][0][2][7] = 50, - [1][1][2][0][1][7] = 46, - [1][1][2][0][3][7] = 64, - [1][1][2][0][5][7] = 48, - [1][1][2][0][6][7] = 46, - [1][1][2][0][9][7] = 46, - [1][1][2][0][8][7] = 48, - [1][1][2][0][11][7] = 46, - [1][1][2][0][2][8] = 50, - [1][1][2][0][1][8] = 46, - [1][1][2][0][3][8] = 64, - [1][1][2][0][5][8] = 48, - [1][1][2][0][6][8] = 46, - [1][1][2][0][9][8] = 46, - [1][1][2][0][8][8] = 48, - [1][1][2][0][11][8] = 46, - [1][1][2][0][2][9] = 38, - [1][1][2][0][1][9] = 46, - [1][1][2][0][3][9] = 64, - [1][1][2][0][5][9] = 38, - [1][1][2][0][6][9] = 46, - [1][1][2][0][9][9] = 46, - [1][1][2][0][8][9] = 48, - [1][1][2][0][11][9] = 46, - [1][1][2][0][2][10] = 36, - [1][1][2][0][1][10] = 46, - [1][1][2][0][3][10] = 64, - [1][1][2][0][5][10] = 36, - [1][1][2][0][6][10] = 46, - [1][1][2][0][9][10] = 46, - [1][1][2][0][8][10] = 48, - [1][1][2][0][11][10] = 46, - [1][1][2][0][2][11] = 127, - [1][1][2][0][1][11] = 127, - [1][1][2][0][3][11] = 127, - [1][1][2][0][5][11] = 127, - [1][1][2][0][6][11] = 127, - [1][1][2][0][9][11] = 127, - [1][1][2][0][8][11] = 127, - [1][1][2][0][11][11] = 127, - [1][1][2][0][2][12] = 127, - [1][1][2][0][1][12] = 127, - [1][1][2][0][3][12] = 127, - [1][1][2][0][5][12] = 127, - [1][1][2][0][6][12] = 127, - [1][1][2][0][9][12] = 127, - [1][1][2][0][8][12] = 127, - [1][1][2][0][11][12] = 127, - [1][1][2][0][2][13] = 127, - [1][1][2][0][1][13] = 127, - [1][1][2][0][3][13] = 127, - [1][1][2][0][5][13] = 127, - [1][1][2][0][6][13] = 127, - [1][1][2][0][9][13] = 127, - [1][1][2][0][8][13] = 127, - [1][1][2][0][11][13] = 127, - [1][1][2][1][2][0] = 127, - [1][1][2][1][1][0] = 127, - [1][1][2][1][3][0] = 127, - [1][1][2][1][5][0] = 127, - [1][1][2][1][6][0] = 127, - [1][1][2][1][9][0] = 127, - [1][1][2][1][8][0] = 127, - [1][1][2][1][11][0] = 127, - [1][1][2][1][2][1] = 127, - [1][1][2][1][1][1] = 127, - [1][1][2][1][3][1] = 127, - [1][1][2][1][5][1] = 127, - [1][1][2][1][6][1] = 127, - [1][1][2][1][9][1] = 127, - [1][1][2][1][8][1] = 127, - [1][1][2][1][11][1] = 127, - [1][1][2][1][2][2] = 52, - [1][1][2][1][1][2] = 34, - [1][1][2][1][3][2] = 64, - [1][1][2][1][5][2] = 52, - [1][1][2][1][6][2] = 34, - [1][1][2][1][9][2] = 34, - [1][1][2][1][8][2] = 36, - [1][1][2][1][11][2] = 34, - [1][1][2][1][2][3] = 52, - [1][1][2][1][1][3] = 34, - [1][1][2][1][3][3] = 64, - [1][1][2][1][5][3] = 52, - [1][1][2][1][6][3] = 34, - [1][1][2][1][9][3] = 34, - [1][1][2][1][8][3] = 36, - [1][1][2][1][11][3] = 34, - [1][1][2][1][2][4] = 56, - [1][1][2][1][1][4] = 34, - [1][1][2][1][3][4] = 64, - [1][1][2][1][5][4] = 56, - [1][1][2][1][6][4] = 34, - [1][1][2][1][9][4] = 34, - [1][1][2][1][8][4] = 36, - [1][1][2][1][11][4] = 34, - [1][1][2][1][2][5] = 60, - [1][1][2][1][1][5] = 34, - [1][1][2][1][3][5] = 64, - [1][1][2][1][5][5] = 60, - [1][1][2][1][6][5] = 34, - [1][1][2][1][9][5] = 34, - [1][1][2][1][8][5] = 36, - [1][1][2][1][11][5] = 34, - [1][1][2][1][2][6] = 54, - [1][1][2][1][1][6] = 34, - [1][1][2][1][3][6] = 64, - [1][1][2][1][5][6] = 52, - [1][1][2][1][6][6] = 34, - [1][1][2][1][9][6] = 34, - [1][1][2][1][8][6] = 36, - [1][1][2][1][11][6] = 34, - [1][1][2][1][2][7] = 50, - [1][1][2][1][1][7] = 34, - [1][1][2][1][3][7] = 64, - [1][1][2][1][5][7] = 48, - [1][1][2][1][6][7] = 34, - [1][1][2][1][9][7] = 34, - [1][1][2][1][8][7] = 36, - [1][1][2][1][11][7] = 34, - [1][1][2][1][2][8] = 50, - [1][1][2][1][1][8] = 34, - [1][1][2][1][3][8] = 64, - [1][1][2][1][5][8] = 48, - [1][1][2][1][6][8] = 34, - [1][1][2][1][9][8] = 34, - [1][1][2][1][8][8] = 36, - [1][1][2][1][11][8] = 34, - [1][1][2][1][2][9] = 38, - [1][1][2][1][1][9] = 34, - [1][1][2][1][3][9] = 64, - [1][1][2][1][5][9] = 38, - [1][1][2][1][6][9] = 34, - [1][1][2][1][9][9] = 34, - [1][1][2][1][8][9] = 36, - [1][1][2][1][11][9] = 34, - [1][1][2][1][2][10] = 36, - [1][1][2][1][1][10] = 34, - [1][1][2][1][3][10] = 64, - [1][1][2][1][5][10] = 36, - [1][1][2][1][6][10] = 34, - [1][1][2][1][9][10] = 34, - [1][1][2][1][8][10] = 36, - [1][1][2][1][11][10] = 34, - [1][1][2][1][2][11] = 127, - [1][1][2][1][1][11] = 127, - [1][1][2][1][3][11] = 127, - [1][1][2][1][5][11] = 127, - [1][1][2][1][6][11] = 127, - [1][1][2][1][9][11] = 127, - [1][1][2][1][8][11] = 127, - [1][1][2][1][11][11] = 127, - [1][1][2][1][2][12] = 127, - [1][1][2][1][1][12] = 127, - [1][1][2][1][3][12] = 127, - [1][1][2][1][5][12] = 127, - [1][1][2][1][6][12] = 127, - [1][1][2][1][9][12] = 127, - [1][1][2][1][8][12] = 127, - [1][1][2][1][11][12] = 127, - [1][1][2][1][2][13] = 127, - [1][1][2][1][1][13] = 127, - [1][1][2][1][3][13] = 127, - [1][1][2][1][5][13] = 127, - [1][1][2][1][6][13] = 127, - [1][1][2][1][9][13] = 127, - [1][1][2][1][8][13] = 127, - [1][1][2][1][11][13] = 127, + [0][0][0][0][RTW89_WW][0] = 56, + [0][0][0][0][RTW89_WW][1] = 56, + [0][0][0][0][RTW89_WW][2] = 56, + [0][0][0][0][RTW89_WW][3] = 56, + [0][0][0][0][RTW89_WW][4] = 56, + [0][0][0][0][RTW89_WW][5] = 56, + [0][0][0][0][RTW89_WW][6] = 56, + [0][0][0][0][RTW89_WW][7] = 56, + [0][0][0][0][RTW89_WW][8] = 56, + [0][0][0][0][RTW89_WW][9] = 56, + [0][0][0][0][RTW89_WW][10] = 56, + [0][0][0][0][RTW89_WW][11] = 56, + [0][0][0][0][RTW89_WW][12] = 48, + [0][0][0][0][RTW89_WW][13] = 76, + [0][1][0][0][RTW89_WW][0] = 44, + [0][1][0][0][RTW89_WW][1] = 44, + [0][1][0][0][RTW89_WW][2] = 44, + [0][1][0][0][RTW89_WW][3] = 44, + [0][1][0][0][RTW89_WW][4] = 44, + [0][1][0][0][RTW89_WW][5] = 44, + [0][1][0][0][RTW89_WW][6] = 44, + [0][1][0][0][RTW89_WW][7] = 44, + [0][1][0][0][RTW89_WW][8] = 44, + [0][1][0][0][RTW89_WW][9] = 44, + [0][1][0][0][RTW89_WW][10] = 44, + [0][1][0][0][RTW89_WW][11] = 44, + [0][1][0][0][RTW89_WW][12] = 38, + [0][1][0][0][RTW89_WW][13] = 64, + [1][0][0][0][RTW89_WW][0] = 0, + [1][0][0][0][RTW89_WW][1] = 0, + [1][0][0][0][RTW89_WW][2] = 58, + [1][0][0][0][RTW89_WW][3] = 58, + [1][0][0][0][RTW89_WW][4] = 58, + [1][0][0][0][RTW89_WW][5] = 58, + [1][0][0][0][RTW89_WW][6] = 46, + [1][0][0][0][RTW89_WW][7] = 46, + [1][0][0][0][RTW89_WW][8] = 46, + [1][0][0][0][RTW89_WW][9] = 32, + [1][0][0][0][RTW89_WW][10] = 32, + [1][0][0][0][RTW89_WW][11] = 0, + [1][0][0][0][RTW89_WW][12] = 0, + [1][0][0][0][RTW89_WW][13] = 0, + [1][1][0][0][RTW89_WW][0] = 0, + [1][1][0][0][RTW89_WW][1] = 0, + [1][1][0][0][RTW89_WW][2] = 46, + [1][1][0][0][RTW89_WW][3] = 46, + [1][1][0][0][RTW89_WW][4] = 46, + [1][1][0][0][RTW89_WW][5] = 46, + [1][1][0][0][RTW89_WW][6] = 46, + [1][1][0][0][RTW89_WW][7] = 46, + [1][1][0][0][RTW89_WW][8] = 46, + [1][1][0][0][RTW89_WW][9] = 24, + [1][1][0][0][RTW89_WW][10] = 24, + [1][1][0][0][RTW89_WW][11] = 0, + [1][1][0][0][RTW89_WW][12] = 0, + [1][1][0][0][RTW89_WW][13] = 0, + [0][0][1][0][RTW89_WW][0] = 58, + [0][0][1][0][RTW89_WW][1] = 58, + [0][0][1][0][RTW89_WW][2] = 58, + [0][0][1][0][RTW89_WW][3] = 58, + [0][0][1][0][RTW89_WW][4] = 58, + [0][0][1][0][RTW89_WW][5] = 58, + [0][0][1][0][RTW89_WW][6] = 58, + [0][0][1][0][RTW89_WW][7] = 58, + [0][0][1][0][RTW89_WW][8] = 58, + [0][0][1][0][RTW89_WW][9] = 58, + [0][0][1][0][RTW89_WW][10] = 58, + [0][0][1][0][RTW89_WW][11] = 56, + [0][0][1][0][RTW89_WW][12] = 52, + [0][0][1][0][RTW89_WW][13] = 0, + [0][1][1][0][RTW89_WW][0] = 46, + [0][1][1][0][RTW89_WW][1] = 46, + [0][1][1][0][RTW89_WW][2] = 46, + [0][1][1][0][RTW89_WW][3] = 46, + [0][1][1][0][RTW89_WW][4] = 46, + [0][1][1][0][RTW89_WW][5] = 46, + [0][1][1][0][RTW89_WW][6] = 46, + [0][1][1][0][RTW89_WW][7] = 46, + [0][1][1][0][RTW89_WW][8] = 46, + [0][1][1][0][RTW89_WW][9] = 46, + [0][1][1][0][RTW89_WW][10] = 46, + [0][1][1][0][RTW89_WW][11] = 42, + [0][1][1][0][RTW89_WW][12] = 40, + [0][1][1][0][RTW89_WW][13] = 0, + [0][0][2][0][RTW89_WW][0] = 58, + [0][0][2][0][RTW89_WW][1] = 58, + [0][0][2][0][RTW89_WW][2] = 58, + [0][0][2][0][RTW89_WW][3] = 58, + [0][0][2][0][RTW89_WW][4] = 58, + [0][0][2][0][RTW89_WW][5] = 58, + [0][0][2][0][RTW89_WW][6] = 58, + [0][0][2][0][RTW89_WW][7] = 58, + [0][0][2][0][RTW89_WW][8] = 58, + [0][0][2][0][RTW89_WW][9] = 58, + [0][0][2][0][RTW89_WW][10] = 58, + [0][0][2][0][RTW89_WW][11] = 54, + [0][0][2][0][RTW89_WW][12] = 50, + [0][0][2][0][RTW89_WW][13] = 0, + [0][1][2][0][RTW89_WW][0] = 46, + [0][1][2][0][RTW89_WW][1] = 46, + [0][1][2][0][RTW89_WW][2] = 46, + [0][1][2][0][RTW89_WW][3] = 46, + [0][1][2][0][RTW89_WW][4] = 46, + [0][1][2][0][RTW89_WW][5] = 46, + [0][1][2][0][RTW89_WW][6] = 46, + [0][1][2][0][RTW89_WW][7] = 46, + [0][1][2][0][RTW89_WW][8] = 46, + [0][1][2][0][RTW89_WW][9] = 46, + [0][1][2][0][RTW89_WW][10] = 46, + [0][1][2][0][RTW89_WW][11] = 42, + [0][1][2][0][RTW89_WW][12] = 40, + [0][1][2][0][RTW89_WW][13] = 0, + [0][1][2][1][RTW89_WW][0] = 34, + [0][1][2][1][RTW89_WW][1] = 34, + [0][1][2][1][RTW89_WW][2] = 34, + [0][1][2][1][RTW89_WW][3] = 34, + [0][1][2][1][RTW89_WW][4] = 34, + [0][1][2][1][RTW89_WW][5] = 34, + [0][1][2][1][RTW89_WW][6] = 34, + [0][1][2][1][RTW89_WW][7] = 34, + [0][1][2][1][RTW89_WW][8] = 34, + [0][1][2][1][RTW89_WW][9] = 34, + [0][1][2][1][RTW89_WW][10] = 34, + [0][1][2][1][RTW89_WW][11] = 34, + [0][1][2][1][RTW89_WW][12] = 34, + [0][1][2][1][RTW89_WW][13] = 0, + [1][0][2][0][RTW89_WW][0] = 0, + [1][0][2][0][RTW89_WW][1] = 0, + [1][0][2][0][RTW89_WW][2] = 56, + [1][0][2][0][RTW89_WW][3] = 56, + [1][0][2][0][RTW89_WW][4] = 58, + [1][0][2][0][RTW89_WW][5] = 58, + [1][0][2][0][RTW89_WW][6] = 54, + [1][0][2][0][RTW89_WW][7] = 50, + [1][0][2][0][RTW89_WW][8] = 50, + [1][0][2][0][RTW89_WW][9] = 42, + [1][0][2][0][RTW89_WW][10] = 40, + [1][0][2][0][RTW89_WW][11] = 0, + [1][0][2][0][RTW89_WW][12] = 0, + [1][0][2][0][RTW89_WW][13] = 0, + [1][1][2][0][RTW89_WW][0] = 0, + [1][1][2][0][RTW89_WW][1] = 0, + [1][1][2][0][RTW89_WW][2] = 46, + [1][1][2][0][RTW89_WW][3] = 46, + [1][1][2][0][RTW89_WW][4] = 46, + [1][1][2][0][RTW89_WW][5] = 46, + [1][1][2][0][RTW89_WW][6] = 46, + [1][1][2][0][RTW89_WW][7] = 46, + [1][1][2][0][RTW89_WW][8] = 46, + [1][1][2][0][RTW89_WW][9] = 38, + [1][1][2][0][RTW89_WW][10] = 36, + [1][1][2][0][RTW89_WW][11] = 0, + [1][1][2][0][RTW89_WW][12] = 0, + [1][1][2][0][RTW89_WW][13] = 0, + [1][1][2][1][RTW89_WW][0] = 0, + [1][1][2][1][RTW89_WW][1] = 0, + [1][1][2][1][RTW89_WW][2] = 34, + [1][1][2][1][RTW89_WW][3] = 34, + [1][1][2][1][RTW89_WW][4] = 34, + [1][1][2][1][RTW89_WW][5] = 34, + [1][1][2][1][RTW89_WW][6] = 34, + [1][1][2][1][RTW89_WW][7] = 34, + [1][1][2][1][RTW89_WW][8] = 34, + [1][1][2][1][RTW89_WW][9] = 34, + [1][1][2][1][RTW89_WW][10] = 34, + [1][1][2][1][RTW89_WW][11] = 0, + [1][1][2][1][RTW89_WW][12] = 0, + [1][1][2][1][RTW89_WW][13] = 0, + [0][0][0][0][RTW89_FCC][0] = 76, + [0][0][0][0][RTW89_ETSI][0] = 56, + [0][0][0][0][RTW89_MKK][0] = 68, + [0][0][0][0][RTW89_IC][0] = 76, + [0][0][0][0][RTW89_KCC][0] = 76, + [0][0][0][0][RTW89_ACMA][0] = 56, + [0][0][0][0][RTW89_CHILE][0] = 60, + [0][0][0][0][RTW89_UKRAINE][0] = 56, + [0][0][0][0][RTW89_MEXICO][0] = 76, + [0][0][0][0][RTW89_CN][0] = 56, + [0][0][0][0][RTW89_QATAR][0] = 56, + [0][0][0][0][RTW89_FCC][1] = 76, + [0][0][0][0][RTW89_ETSI][1] = 56, + [0][0][0][0][RTW89_MKK][1] = 68, + [0][0][0][0][RTW89_IC][1] = 76, + [0][0][0][0][RTW89_KCC][1] = 76, + [0][0][0][0][RTW89_ACMA][1] = 56, + [0][0][0][0][RTW89_CHILE][1] = 60, + [0][0][0][0][RTW89_UKRAINE][1] = 56, + [0][0][0][0][RTW89_MEXICO][1] = 76, + [0][0][0][0][RTW89_CN][1] = 56, + [0][0][0][0][RTW89_QATAR][1] = 56, + [0][0][0][0][RTW89_FCC][2] = 76, + [0][0][0][0][RTW89_ETSI][2] = 56, + [0][0][0][0][RTW89_MKK][2] = 68, + [0][0][0][0][RTW89_IC][2] = 76, + [0][0][0][0][RTW89_KCC][2] = 76, + [0][0][0][0][RTW89_ACMA][2] = 56, + [0][0][0][0][RTW89_CHILE][2] = 60, + [0][0][0][0][RTW89_UKRAINE][2] = 56, + [0][0][0][0][RTW89_MEXICO][2] = 76, + [0][0][0][0][RTW89_CN][2] = 56, + [0][0][0][0][RTW89_QATAR][2] = 56, + [0][0][0][0][RTW89_FCC][3] = 76, + [0][0][0][0][RTW89_ETSI][3] = 56, + [0][0][0][0][RTW89_MKK][3] = 68, + [0][0][0][0][RTW89_IC][3] = 76, + [0][0][0][0][RTW89_KCC][3] = 76, + [0][0][0][0][RTW89_ACMA][3] = 56, + [0][0][0][0][RTW89_CHILE][3] = 60, + [0][0][0][0][RTW89_UKRAINE][3] = 56, + [0][0][0][0][RTW89_MEXICO][3] = 76, + [0][0][0][0][RTW89_CN][3] = 56, + [0][0][0][0][RTW89_QATAR][3] = 56, + [0][0][0][0][RTW89_FCC][4] = 76, + [0][0][0][0][RTW89_ETSI][4] = 56, + [0][0][0][0][RTW89_MKK][4] = 68, + [0][0][0][0][RTW89_IC][4] = 76, + [0][0][0][0][RTW89_KCC][4] = 76, + [0][0][0][0][RTW89_ACMA][4] = 56, + [0][0][0][0][RTW89_CHILE][4] = 60, + [0][0][0][0][RTW89_UKRAINE][4] = 56, + [0][0][0][0][RTW89_MEXICO][4] = 76, + [0][0][0][0][RTW89_CN][4] = 56, + [0][0][0][0][RTW89_QATAR][4] = 56, + [0][0][0][0][RTW89_FCC][5] = 76, + [0][0][0][0][RTW89_ETSI][5] = 56, + [0][0][0][0][RTW89_MKK][5] = 68, + [0][0][0][0][RTW89_IC][5] = 76, + [0][0][0][0][RTW89_KCC][5] = 76, + [0][0][0][0][RTW89_ACMA][5] = 56, + [0][0][0][0][RTW89_CHILE][5] = 60, + [0][0][0][0][RTW89_UKRAINE][5] = 56, + [0][0][0][0][RTW89_MEXICO][5] = 76, + [0][0][0][0][RTW89_CN][5] = 56, + [0][0][0][0][RTW89_QATAR][5] = 56, + [0][0][0][0][RTW89_FCC][6] = 76, + [0][0][0][0][RTW89_ETSI][6] = 56, + [0][0][0][0][RTW89_MKK][6] = 68, + [0][0][0][0][RTW89_IC][6] = 76, + [0][0][0][0][RTW89_KCC][6] = 76, + [0][0][0][0][RTW89_ACMA][6] = 56, + [0][0][0][0][RTW89_CHILE][6] = 60, + [0][0][0][0][RTW89_UKRAINE][6] = 56, + [0][0][0][0][RTW89_MEXICO][6] = 76, + [0][0][0][0][RTW89_CN][6] = 56, + [0][0][0][0][RTW89_QATAR][6] = 56, + [0][0][0][0][RTW89_FCC][7] = 76, + [0][0][0][0][RTW89_ETSI][7] = 56, + [0][0][0][0][RTW89_MKK][7] = 68, + [0][0][0][0][RTW89_IC][7] = 76, + [0][0][0][0][RTW89_KCC][7] = 76, + [0][0][0][0][RTW89_ACMA][7] = 56, + [0][0][0][0][RTW89_CHILE][7] = 60, + [0][0][0][0][RTW89_UKRAINE][7] = 56, + [0][0][0][0][RTW89_MEXICO][7] = 76, + [0][0][0][0][RTW89_CN][7] = 56, + [0][0][0][0][RTW89_QATAR][7] = 56, + [0][0][0][0][RTW89_FCC][8] = 76, + [0][0][0][0][RTW89_ETSI][8] = 56, + [0][0][0][0][RTW89_MKK][8] = 68, + [0][0][0][0][RTW89_IC][8] = 76, + [0][0][0][0][RTW89_KCC][8] = 76, + [0][0][0][0][RTW89_ACMA][8] = 56, + [0][0][0][0][RTW89_CHILE][8] = 60, + [0][0][0][0][RTW89_UKRAINE][8] = 56, + [0][0][0][0][RTW89_MEXICO][8] = 76, + [0][0][0][0][RTW89_CN][8] = 56, + [0][0][0][0][RTW89_QATAR][8] = 56, + [0][0][0][0][RTW89_FCC][9] = 76, + [0][0][0][0][RTW89_ETSI][9] = 56, + [0][0][0][0][RTW89_MKK][9] = 68, + [0][0][0][0][RTW89_IC][9] = 76, + [0][0][0][0][RTW89_KCC][9] = 76, + [0][0][0][0][RTW89_ACMA][9] = 56, + [0][0][0][0][RTW89_CHILE][9] = 60, + [0][0][0][0][RTW89_UKRAINE][9] = 56, + [0][0][0][0][RTW89_MEXICO][9] = 76, + [0][0][0][0][RTW89_CN][9] = 56, + [0][0][0][0][RTW89_QATAR][9] = 56, + [0][0][0][0][RTW89_FCC][10] = 76, + [0][0][0][0][RTW89_ETSI][10] = 56, + [0][0][0][0][RTW89_MKK][10] = 68, + [0][0][0][0][RTW89_IC][10] = 76, + [0][0][0][0][RTW89_KCC][10] = 76, + [0][0][0][0][RTW89_ACMA][10] = 56, + [0][0][0][0][RTW89_CHILE][10] = 60, + [0][0][0][0][RTW89_UKRAINE][10] = 56, + [0][0][0][0][RTW89_MEXICO][10] = 76, + [0][0][0][0][RTW89_CN][10] = 56, + [0][0][0][0][RTW89_QATAR][10] = 56, + [0][0][0][0][RTW89_FCC][11] = 68, + [0][0][0][0][RTW89_ETSI][11] = 56, + [0][0][0][0][RTW89_MKK][11] = 68, + [0][0][0][0][RTW89_IC][11] = 68, + [0][0][0][0][RTW89_KCC][11] = 76, + [0][0][0][0][RTW89_ACMA][11] = 56, + [0][0][0][0][RTW89_CHILE][11] = 60, + [0][0][0][0][RTW89_UKRAINE][11] = 56, + [0][0][0][0][RTW89_MEXICO][11] = 68, + [0][0][0][0][RTW89_CN][11] = 56, + [0][0][0][0][RTW89_QATAR][11] = 56, + [0][0][0][0][RTW89_FCC][12] = 48, + [0][0][0][0][RTW89_ETSI][12] = 56, + [0][0][0][0][RTW89_MKK][12] = 68, + [0][0][0][0][RTW89_IC][12] = 48, + [0][0][0][0][RTW89_KCC][12] = 76, + [0][0][0][0][RTW89_ACMA][12] = 56, + [0][0][0][0][RTW89_CHILE][12] = 48, + [0][0][0][0][RTW89_UKRAINE][12] = 56, + [0][0][0][0][RTW89_MEXICO][12] = 48, + [0][0][0][0][RTW89_CN][12] = 56, + [0][0][0][0][RTW89_QATAR][12] = 56, + [0][0][0][0][RTW89_FCC][13] = 127, + [0][0][0][0][RTW89_ETSI][13] = 127, + [0][0][0][0][RTW89_MKK][13] = 76, + [0][0][0][0][RTW89_IC][13] = 127, + [0][0][0][0][RTW89_KCC][13] = 127, + [0][0][0][0][RTW89_ACMA][13] = 127, + [0][0][0][0][RTW89_CHILE][13] = 127, + [0][0][0][0][RTW89_UKRAINE][13] = 127, + [0][0][0][0][RTW89_MEXICO][13] = 127, + [0][0][0][0][RTW89_CN][13] = 127, + [0][0][0][0][RTW89_QATAR][13] = 127, + [0][1][0][0][RTW89_FCC][0] = 74, + [0][1][0][0][RTW89_ETSI][0] = 44, + [0][1][0][0][RTW89_MKK][0] = 56, + [0][1][0][0][RTW89_IC][0] = 74, + [0][1][0][0][RTW89_KCC][0] = 68, + [0][1][0][0][RTW89_ACMA][0] = 44, + [0][1][0][0][RTW89_CHILE][0] = 48, + [0][1][0][0][RTW89_UKRAINE][0] = 44, + [0][1][0][0][RTW89_MEXICO][0] = 74, + [0][1][0][0][RTW89_CN][0] = 44, + [0][1][0][0][RTW89_QATAR][0] = 44, + [0][1][0][0][RTW89_FCC][1] = 76, + [0][1][0][0][RTW89_ETSI][1] = 44, + [0][1][0][0][RTW89_MKK][1] = 56, + [0][1][0][0][RTW89_IC][1] = 76, + [0][1][0][0][RTW89_KCC][1] = 68, + [0][1][0][0][RTW89_ACMA][1] = 44, + [0][1][0][0][RTW89_CHILE][1] = 48, + [0][1][0][0][RTW89_UKRAINE][1] = 44, + [0][1][0][0][RTW89_MEXICO][1] = 76, + [0][1][0][0][RTW89_CN][1] = 44, + [0][1][0][0][RTW89_QATAR][1] = 44, + [0][1][0][0][RTW89_FCC][2] = 76, + [0][1][0][0][RTW89_ETSI][2] = 44, + [0][1][0][0][RTW89_MKK][2] = 56, + [0][1][0][0][RTW89_IC][2] = 76, + [0][1][0][0][RTW89_KCC][2] = 68, + [0][1][0][0][RTW89_ACMA][2] = 44, + [0][1][0][0][RTW89_CHILE][2] = 48, + [0][1][0][0][RTW89_UKRAINE][2] = 44, + [0][1][0][0][RTW89_MEXICO][2] = 76, + [0][1][0][0][RTW89_CN][2] = 44, + [0][1][0][0][RTW89_QATAR][2] = 44, + [0][1][0][0][RTW89_FCC][3] = 76, + [0][1][0][0][RTW89_ETSI][3] = 44, + [0][1][0][0][RTW89_MKK][3] = 56, + [0][1][0][0][RTW89_IC][3] = 76, + [0][1][0][0][RTW89_KCC][3] = 68, + [0][1][0][0][RTW89_ACMA][3] = 44, + [0][1][0][0][RTW89_CHILE][3] = 48, + [0][1][0][0][RTW89_UKRAINE][3] = 44, + [0][1][0][0][RTW89_MEXICO][3] = 76, + [0][1][0][0][RTW89_CN][3] = 44, + [0][1][0][0][RTW89_QATAR][3] = 44, + [0][1][0][0][RTW89_FCC][4] = 76, + [0][1][0][0][RTW89_ETSI][4] = 44, + [0][1][0][0][RTW89_MKK][4] = 56, + [0][1][0][0][RTW89_IC][4] = 76, + [0][1][0][0][RTW89_KCC][4] = 68, + [0][1][0][0][RTW89_ACMA][4] = 44, + [0][1][0][0][RTW89_CHILE][4] = 48, + [0][1][0][0][RTW89_UKRAINE][4] = 44, + [0][1][0][0][RTW89_MEXICO][4] = 76, + [0][1][0][0][RTW89_CN][4] = 44, + [0][1][0][0][RTW89_QATAR][4] = 44, + [0][1][0][0][RTW89_FCC][5] = 76, + [0][1][0][0][RTW89_ETSI][5] = 44, + [0][1][0][0][RTW89_MKK][5] = 56, + [0][1][0][0][RTW89_IC][5] = 76, + [0][1][0][0][RTW89_KCC][5] = 68, + [0][1][0][0][RTW89_ACMA][5] = 44, + [0][1][0][0][RTW89_CHILE][5] = 48, + [0][1][0][0][RTW89_UKRAINE][5] = 44, + [0][1][0][0][RTW89_MEXICO][5] = 76, + [0][1][0][0][RTW89_CN][5] = 44, + [0][1][0][0][RTW89_QATAR][5] = 44, + [0][1][0][0][RTW89_FCC][6] = 76, + [0][1][0][0][RTW89_ETSI][6] = 44, + [0][1][0][0][RTW89_MKK][6] = 56, + [0][1][0][0][RTW89_IC][6] = 76, + [0][1][0][0][RTW89_KCC][6] = 68, + [0][1][0][0][RTW89_ACMA][6] = 44, + [0][1][0][0][RTW89_CHILE][6] = 48, + [0][1][0][0][RTW89_UKRAINE][6] = 44, + [0][1][0][0][RTW89_MEXICO][6] = 76, + [0][1][0][0][RTW89_CN][6] = 44, + [0][1][0][0][RTW89_QATAR][6] = 44, + [0][1][0][0][RTW89_FCC][7] = 76, + [0][1][0][0][RTW89_ETSI][7] = 44, + [0][1][0][0][RTW89_MKK][7] = 56, + [0][1][0][0][RTW89_IC][7] = 76, + [0][1][0][0][RTW89_KCC][7] = 68, + [0][1][0][0][RTW89_ACMA][7] = 44, + [0][1][0][0][RTW89_CHILE][7] = 48, + [0][1][0][0][RTW89_UKRAINE][7] = 44, + [0][1][0][0][RTW89_MEXICO][7] = 76, + [0][1][0][0][RTW89_CN][7] = 44, + [0][1][0][0][RTW89_QATAR][7] = 44, + [0][1][0][0][RTW89_FCC][8] = 76, + [0][1][0][0][RTW89_ETSI][8] = 44, + [0][1][0][0][RTW89_MKK][8] = 56, + [0][1][0][0][RTW89_IC][8] = 76, + [0][1][0][0][RTW89_KCC][8] = 68, + [0][1][0][0][RTW89_ACMA][8] = 44, + [0][1][0][0][RTW89_CHILE][8] = 48, + [0][1][0][0][RTW89_UKRAINE][8] = 44, + [0][1][0][0][RTW89_MEXICO][8] = 76, + [0][1][0][0][RTW89_CN][8] = 44, + [0][1][0][0][RTW89_QATAR][8] = 44, + [0][1][0][0][RTW89_FCC][9] = 76, + [0][1][0][0][RTW89_ETSI][9] = 44, + [0][1][0][0][RTW89_MKK][9] = 56, + [0][1][0][0][RTW89_IC][9] = 76, + [0][1][0][0][RTW89_KCC][9] = 68, + [0][1][0][0][RTW89_ACMA][9] = 44, + [0][1][0][0][RTW89_CHILE][9] = 48, + [0][1][0][0][RTW89_UKRAINE][9] = 44, + [0][1][0][0][RTW89_MEXICO][9] = 76, + [0][1][0][0][RTW89_CN][9] = 44, + [0][1][0][0][RTW89_QATAR][9] = 44, + [0][1][0][0][RTW89_FCC][10] = 62, + [0][1][0][0][RTW89_ETSI][10] = 44, + [0][1][0][0][RTW89_MKK][10] = 56, + [0][1][0][0][RTW89_IC][10] = 62, + [0][1][0][0][RTW89_KCC][10] = 68, + [0][1][0][0][RTW89_ACMA][10] = 44, + [0][1][0][0][RTW89_CHILE][10] = 48, + [0][1][0][0][RTW89_UKRAINE][10] = 44, + [0][1][0][0][RTW89_MEXICO][10] = 62, + [0][1][0][0][RTW89_CN][10] = 44, + [0][1][0][0][RTW89_QATAR][10] = 44, + [0][1][0][0][RTW89_FCC][11] = 52, + [0][1][0][0][RTW89_ETSI][11] = 44, + [0][1][0][0][RTW89_MKK][11] = 56, + [0][1][0][0][RTW89_IC][11] = 52, + [0][1][0][0][RTW89_KCC][11] = 68, + [0][1][0][0][RTW89_ACMA][11] = 44, + [0][1][0][0][RTW89_CHILE][11] = 48, + [0][1][0][0][RTW89_UKRAINE][11] = 44, + [0][1][0][0][RTW89_MEXICO][11] = 52, + [0][1][0][0][RTW89_CN][11] = 44, + [0][1][0][0][RTW89_QATAR][11] = 44, + [0][1][0][0][RTW89_FCC][12] = 38, + [0][1][0][0][RTW89_ETSI][12] = 44, + [0][1][0][0][RTW89_MKK][12] = 56, + [0][1][0][0][RTW89_IC][12] = 38, + [0][1][0][0][RTW89_KCC][12] = 68, + [0][1][0][0][RTW89_ACMA][12] = 44, + [0][1][0][0][RTW89_CHILE][12] = 38, + [0][1][0][0][RTW89_UKRAINE][12] = 44, + [0][1][0][0][RTW89_MEXICO][12] = 38, + [0][1][0][0][RTW89_CN][12] = 44, + [0][1][0][0][RTW89_QATAR][12] = 44, + [0][1][0][0][RTW89_FCC][13] = 127, + [0][1][0][0][RTW89_ETSI][13] = 127, + [0][1][0][0][RTW89_MKK][13] = 64, + [0][1][0][0][RTW89_IC][13] = 127, + [0][1][0][0][RTW89_KCC][13] = 127, + [0][1][0][0][RTW89_ACMA][13] = 127, + [0][1][0][0][RTW89_CHILE][13] = 127, + [0][1][0][0][RTW89_UKRAINE][13] = 127, + [0][1][0][0][RTW89_MEXICO][13] = 127, + [0][1][0][0][RTW89_CN][13] = 127, + [0][1][0][0][RTW89_QATAR][13] = 127, + [1][0][0][0][RTW89_FCC][0] = 127, + [1][0][0][0][RTW89_ETSI][0] = 127, + [1][0][0][0][RTW89_MKK][0] = 127, + [1][0][0][0][RTW89_IC][0] = 127, + [1][0][0][0][RTW89_KCC][0] = 127, + [1][0][0][0][RTW89_ACMA][0] = 127, + [1][0][0][0][RTW89_CHILE][0] = 127, + [1][0][0][0][RTW89_UKRAINE][0] = 127, + [1][0][0][0][RTW89_MEXICO][0] = 127, + [1][0][0][0][RTW89_CN][0] = 127, + [1][0][0][0][RTW89_QATAR][0] = 127, + [1][0][0][0][RTW89_FCC][1] = 127, + [1][0][0][0][RTW89_ETSI][1] = 127, + [1][0][0][0][RTW89_MKK][1] = 127, + [1][0][0][0][RTW89_IC][1] = 127, + [1][0][0][0][RTW89_KCC][1] = 127, + [1][0][0][0][RTW89_ACMA][1] = 127, + [1][0][0][0][RTW89_CHILE][1] = 127, + [1][0][0][0][RTW89_UKRAINE][1] = 127, + [1][0][0][0][RTW89_MEXICO][1] = 127, + [1][0][0][0][RTW89_CN][1] = 127, + [1][0][0][0][RTW89_QATAR][1] = 127, + [1][0][0][0][RTW89_FCC][2] = 60, + [1][0][0][0][RTW89_ETSI][2] = 58, + [1][0][0][0][RTW89_MKK][2] = 68, + [1][0][0][0][RTW89_IC][2] = 60, + [1][0][0][0][RTW89_KCC][2] = 70, + [1][0][0][0][RTW89_ACMA][2] = 58, + [1][0][0][0][RTW89_CHILE][2] = 60, + [1][0][0][0][RTW89_UKRAINE][2] = 58, + [1][0][0][0][RTW89_MEXICO][2] = 60, + [1][0][0][0][RTW89_CN][2] = 58, + [1][0][0][0][RTW89_QATAR][2] = 58, + [1][0][0][0][RTW89_FCC][3] = 60, + [1][0][0][0][RTW89_ETSI][3] = 58, + [1][0][0][0][RTW89_MKK][3] = 68, + [1][0][0][0][RTW89_IC][3] = 60, + [1][0][0][0][RTW89_KCC][3] = 70, + [1][0][0][0][RTW89_ACMA][3] = 58, + [1][0][0][0][RTW89_CHILE][3] = 60, + [1][0][0][0][RTW89_UKRAINE][3] = 58, + [1][0][0][0][RTW89_MEXICO][3] = 60, + [1][0][0][0][RTW89_CN][3] = 58, + [1][0][0][0][RTW89_QATAR][3] = 58, + [1][0][0][0][RTW89_FCC][4] = 60, + [1][0][0][0][RTW89_ETSI][4] = 58, + [1][0][0][0][RTW89_MKK][4] = 68, + [1][0][0][0][RTW89_IC][4] = 60, + [1][0][0][0][RTW89_KCC][4] = 70, + [1][0][0][0][RTW89_ACMA][4] = 58, + [1][0][0][0][RTW89_CHILE][4] = 60, + [1][0][0][0][RTW89_UKRAINE][4] = 58, + [1][0][0][0][RTW89_MEXICO][4] = 60, + [1][0][0][0][RTW89_CN][4] = 58, + [1][0][0][0][RTW89_QATAR][4] = 58, + [1][0][0][0][RTW89_FCC][5] = 60, + [1][0][0][0][RTW89_ETSI][5] = 58, + [1][0][0][0][RTW89_MKK][5] = 68, + [1][0][0][0][RTW89_IC][5] = 60, + [1][0][0][0][RTW89_KCC][5] = 70, + [1][0][0][0][RTW89_ACMA][5] = 58, + [1][0][0][0][RTW89_CHILE][5] = 60, + [1][0][0][0][RTW89_UKRAINE][5] = 58, + [1][0][0][0][RTW89_MEXICO][5] = 60, + [1][0][0][0][RTW89_CN][5] = 58, + [1][0][0][0][RTW89_QATAR][5] = 58, + [1][0][0][0][RTW89_FCC][6] = 46, + [1][0][0][0][RTW89_ETSI][6] = 58, + [1][0][0][0][RTW89_MKK][6] = 68, + [1][0][0][0][RTW89_IC][6] = 46, + [1][0][0][0][RTW89_KCC][6] = 70, + [1][0][0][0][RTW89_ACMA][6] = 58, + [1][0][0][0][RTW89_CHILE][6] = 46, + [1][0][0][0][RTW89_UKRAINE][6] = 58, + [1][0][0][0][RTW89_MEXICO][6] = 46, + [1][0][0][0][RTW89_CN][6] = 58, + [1][0][0][0][RTW89_QATAR][6] = 58, + [1][0][0][0][RTW89_FCC][7] = 46, + [1][0][0][0][RTW89_ETSI][7] = 58, + [1][0][0][0][RTW89_MKK][7] = 68, + [1][0][0][0][RTW89_IC][7] = 46, + [1][0][0][0][RTW89_KCC][7] = 70, + [1][0][0][0][RTW89_ACMA][7] = 58, + [1][0][0][0][RTW89_CHILE][7] = 46, + [1][0][0][0][RTW89_UKRAINE][7] = 58, + [1][0][0][0][RTW89_MEXICO][7] = 46, + [1][0][0][0][RTW89_CN][7] = 58, + [1][0][0][0][RTW89_QATAR][7] = 58, + [1][0][0][0][RTW89_FCC][8] = 46, + [1][0][0][0][RTW89_ETSI][8] = 58, + [1][0][0][0][RTW89_MKK][8] = 68, + [1][0][0][0][RTW89_IC][8] = 46, + [1][0][0][0][RTW89_KCC][8] = 70, + [1][0][0][0][RTW89_ACMA][8] = 58, + [1][0][0][0][RTW89_CHILE][8] = 46, + [1][0][0][0][RTW89_UKRAINE][8] = 58, + [1][0][0][0][RTW89_MEXICO][8] = 46, + [1][0][0][0][RTW89_CN][8] = 58, + [1][0][0][0][RTW89_QATAR][8] = 58, + [1][0][0][0][RTW89_FCC][9] = 32, + [1][0][0][0][RTW89_ETSI][9] = 58, + [1][0][0][0][RTW89_MKK][9] = 68, + [1][0][0][0][RTW89_IC][9] = 32, + [1][0][0][0][RTW89_KCC][9] = 70, + [1][0][0][0][RTW89_ACMA][9] = 58, + [1][0][0][0][RTW89_CHILE][9] = 32, + [1][0][0][0][RTW89_UKRAINE][9] = 58, + [1][0][0][0][RTW89_MEXICO][9] = 32, + [1][0][0][0][RTW89_CN][9] = 58, + [1][0][0][0][RTW89_QATAR][9] = 58, + [1][0][0][0][RTW89_FCC][10] = 32, + [1][0][0][0][RTW89_ETSI][10] = 58, + [1][0][0][0][RTW89_MKK][10] = 68, + [1][0][0][0][RTW89_IC][10] = 32, + [1][0][0][0][RTW89_KCC][10] = 70, + [1][0][0][0][RTW89_ACMA][10] = 58, + [1][0][0][0][RTW89_CHILE][10] = 32, + [1][0][0][0][RTW89_UKRAINE][10] = 58, + [1][0][0][0][RTW89_MEXICO][10] = 32, + [1][0][0][0][RTW89_CN][10] = 58, + [1][0][0][0][RTW89_QATAR][10] = 58, + [1][0][0][0][RTW89_FCC][11] = 127, + [1][0][0][0][RTW89_ETSI][11] = 127, + [1][0][0][0][RTW89_MKK][11] = 127, + [1][0][0][0][RTW89_IC][11] = 127, + [1][0][0][0][RTW89_KCC][11] = 127, + [1][0][0][0][RTW89_ACMA][11] = 127, + [1][0][0][0][RTW89_CHILE][11] = 127, + [1][0][0][0][RTW89_UKRAINE][11] = 127, + [1][0][0][0][RTW89_MEXICO][11] = 127, + [1][0][0][0][RTW89_CN][11] = 127, + [1][0][0][0][RTW89_QATAR][11] = 127, + [1][0][0][0][RTW89_FCC][12] = 127, + [1][0][0][0][RTW89_ETSI][12] = 127, + [1][0][0][0][RTW89_MKK][12] = 127, + [1][0][0][0][RTW89_IC][12] = 127, + [1][0][0][0][RTW89_KCC][12] = 127, + [1][0][0][0][RTW89_ACMA][12] = 127, + [1][0][0][0][RTW89_CHILE][12] = 127, + [1][0][0][0][RTW89_UKRAINE][12] = 127, + [1][0][0][0][RTW89_MEXICO][12] = 127, + [1][0][0][0][RTW89_CN][12] = 127, + [1][0][0][0][RTW89_QATAR][12] = 127, + [1][0][0][0][RTW89_FCC][13] = 127, + [1][0][0][0][RTW89_ETSI][13] = 127, + [1][0][0][0][RTW89_MKK][13] = 127, + [1][0][0][0][RTW89_IC][13] = 127, + [1][0][0][0][RTW89_KCC][13] = 127, + [1][0][0][0][RTW89_ACMA][13] = 127, + [1][0][0][0][RTW89_CHILE][13] = 127, + [1][0][0][0][RTW89_UKRAINE][13] = 127, + [1][0][0][0][RTW89_MEXICO][13] = 127, + [1][0][0][0][RTW89_CN][13] = 127, + [1][0][0][0][RTW89_QATAR][13] = 127, + [1][1][0][0][RTW89_FCC][0] = 127, + [1][1][0][0][RTW89_ETSI][0] = 127, + [1][1][0][0][RTW89_MKK][0] = 127, + [1][1][0][0][RTW89_IC][0] = 127, + [1][1][0][0][RTW89_KCC][0] = 127, + [1][1][0][0][RTW89_ACMA][0] = 127, + [1][1][0][0][RTW89_CHILE][0] = 127, + [1][1][0][0][RTW89_UKRAINE][0] = 127, + [1][1][0][0][RTW89_MEXICO][0] = 127, + [1][1][0][0][RTW89_CN][0] = 127, + [1][1][0][0][RTW89_QATAR][0] = 127, + [1][1][0][0][RTW89_FCC][1] = 127, + [1][1][0][0][RTW89_ETSI][1] = 127, + [1][1][0][0][RTW89_MKK][1] = 127, + [1][1][0][0][RTW89_IC][1] = 127, + [1][1][0][0][RTW89_KCC][1] = 127, + [1][1][0][0][RTW89_ACMA][1] = 127, + [1][1][0][0][RTW89_CHILE][1] = 127, + [1][1][0][0][RTW89_UKRAINE][1] = 127, + [1][1][0][0][RTW89_MEXICO][1] = 127, + [1][1][0][0][RTW89_CN][1] = 127, + [1][1][0][0][RTW89_QATAR][1] = 127, + [1][1][0][0][RTW89_FCC][2] = 48, + [1][1][0][0][RTW89_ETSI][2] = 46, + [1][1][0][0][RTW89_MKK][2] = 56, + [1][1][0][0][RTW89_IC][2] = 48, + [1][1][0][0][RTW89_KCC][2] = 58, + [1][1][0][0][RTW89_ACMA][2] = 46, + [1][1][0][0][RTW89_CHILE][2] = 48, + [1][1][0][0][RTW89_UKRAINE][2] = 46, + [1][1][0][0][RTW89_MEXICO][2] = 48, + [1][1][0][0][RTW89_CN][2] = 46, + [1][1][0][0][RTW89_QATAR][2] = 46, + [1][1][0][0][RTW89_FCC][3] = 48, + [1][1][0][0][RTW89_ETSI][3] = 46, + [1][1][0][0][RTW89_MKK][3] = 56, + [1][1][0][0][RTW89_IC][3] = 48, + [1][1][0][0][RTW89_KCC][3] = 58, + [1][1][0][0][RTW89_ACMA][3] = 46, + [1][1][0][0][RTW89_CHILE][3] = 48, + [1][1][0][0][RTW89_UKRAINE][3] = 46, + [1][1][0][0][RTW89_MEXICO][3] = 48, + [1][1][0][0][RTW89_CN][3] = 46, + [1][1][0][0][RTW89_QATAR][3] = 46, + [1][1][0][0][RTW89_FCC][4] = 48, + [1][1][0][0][RTW89_ETSI][4] = 46, + [1][1][0][0][RTW89_MKK][4] = 56, + [1][1][0][0][RTW89_IC][4] = 48, + [1][1][0][0][RTW89_KCC][4] = 58, + [1][1][0][0][RTW89_ACMA][4] = 46, + [1][1][0][0][RTW89_CHILE][4] = 48, + [1][1][0][0][RTW89_UKRAINE][4] = 46, + [1][1][0][0][RTW89_MEXICO][4] = 48, + [1][1][0][0][RTW89_CN][4] = 46, + [1][1][0][0][RTW89_QATAR][4] = 46, + [1][1][0][0][RTW89_FCC][5] = 58, + [1][1][0][0][RTW89_ETSI][5] = 46, + [1][1][0][0][RTW89_MKK][5] = 56, + [1][1][0][0][RTW89_IC][5] = 58, + [1][1][0][0][RTW89_KCC][5] = 58, + [1][1][0][0][RTW89_ACMA][5] = 46, + [1][1][0][0][RTW89_CHILE][5] = 48, + [1][1][0][0][RTW89_UKRAINE][5] = 46, + [1][1][0][0][RTW89_MEXICO][5] = 58, + [1][1][0][0][RTW89_CN][5] = 46, + [1][1][0][0][RTW89_QATAR][5] = 46, + [1][1][0][0][RTW89_FCC][6] = 46, + [1][1][0][0][RTW89_ETSI][6] = 46, + [1][1][0][0][RTW89_MKK][6] = 56, + [1][1][0][0][RTW89_IC][6] = 46, + [1][1][0][0][RTW89_KCC][6] = 58, + [1][1][0][0][RTW89_ACMA][6] = 46, + [1][1][0][0][RTW89_CHILE][6] = 46, + [1][1][0][0][RTW89_UKRAINE][6] = 46, + [1][1][0][0][RTW89_MEXICO][6] = 46, + [1][1][0][0][RTW89_CN][6] = 46, + [1][1][0][0][RTW89_QATAR][6] = 46, + [1][1][0][0][RTW89_FCC][7] = 46, + [1][1][0][0][RTW89_ETSI][7] = 46, + [1][1][0][0][RTW89_MKK][7] = 56, + [1][1][0][0][RTW89_IC][7] = 46, + [1][1][0][0][RTW89_KCC][7] = 58, + [1][1][0][0][RTW89_ACMA][7] = 46, + [1][1][0][0][RTW89_CHILE][7] = 46, + [1][1][0][0][RTW89_UKRAINE][7] = 46, + [1][1][0][0][RTW89_MEXICO][7] = 46, + [1][1][0][0][RTW89_CN][7] = 46, + [1][1][0][0][RTW89_QATAR][7] = 46, + [1][1][0][0][RTW89_FCC][8] = 46, + [1][1][0][0][RTW89_ETSI][8] = 46, + [1][1][0][0][RTW89_MKK][8] = 56, + [1][1][0][0][RTW89_IC][8] = 46, + [1][1][0][0][RTW89_KCC][8] = 58, + [1][1][0][0][RTW89_ACMA][8] = 46, + [1][1][0][0][RTW89_CHILE][8] = 46, + [1][1][0][0][RTW89_UKRAINE][8] = 46, + [1][1][0][0][RTW89_MEXICO][8] = 46, + [1][1][0][0][RTW89_CN][8] = 46, + [1][1][0][0][RTW89_QATAR][8] = 46, + [1][1][0][0][RTW89_FCC][9] = 24, + [1][1][0][0][RTW89_ETSI][9] = 46, + [1][1][0][0][RTW89_MKK][9] = 56, + [1][1][0][0][RTW89_IC][9] = 24, + [1][1][0][0][RTW89_KCC][9] = 58, + [1][1][0][0][RTW89_ACMA][9] = 46, + [1][1][0][0][RTW89_CHILE][9] = 24, + [1][1][0][0][RTW89_UKRAINE][9] = 46, + [1][1][0][0][RTW89_MEXICO][9] = 24, + [1][1][0][0][RTW89_CN][9] = 46, + [1][1][0][0][RTW89_QATAR][9] = 46, + [1][1][0][0][RTW89_FCC][10] = 24, + [1][1][0][0][RTW89_ETSI][10] = 46, + [1][1][0][0][RTW89_MKK][10] = 56, + [1][1][0][0][RTW89_IC][10] = 24, + [1][1][0][0][RTW89_KCC][10] = 58, + [1][1][0][0][RTW89_ACMA][10] = 46, + [1][1][0][0][RTW89_CHILE][10] = 24, + [1][1][0][0][RTW89_UKRAINE][10] = 46, + [1][1][0][0][RTW89_MEXICO][10] = 24, + [1][1][0][0][RTW89_CN][10] = 46, + [1][1][0][0][RTW89_QATAR][10] = 46, + [1][1][0][0][RTW89_FCC][11] = 127, + [1][1][0][0][RTW89_ETSI][11] = 127, + [1][1][0][0][RTW89_MKK][11] = 127, + [1][1][0][0][RTW89_IC][11] = 127, + [1][1][0][0][RTW89_KCC][11] = 127, + [1][1][0][0][RTW89_ACMA][11] = 127, + [1][1][0][0][RTW89_CHILE][11] = 127, + [1][1][0][0][RTW89_UKRAINE][11] = 127, + [1][1][0][0][RTW89_MEXICO][11] = 127, + [1][1][0][0][RTW89_CN][11] = 127, + [1][1][0][0][RTW89_QATAR][11] = 127, + [1][1][0][0][RTW89_FCC][12] = 127, + [1][1][0][0][RTW89_ETSI][12] = 127, + [1][1][0][0][RTW89_MKK][12] = 127, + [1][1][0][0][RTW89_IC][12] = 127, + [1][1][0][0][RTW89_KCC][12] = 127, + [1][1][0][0][RTW89_ACMA][12] = 127, + [1][1][0][0][RTW89_CHILE][12] = 127, + [1][1][0][0][RTW89_UKRAINE][12] = 127, + [1][1][0][0][RTW89_MEXICO][12] = 127, + [1][1][0][0][RTW89_CN][12] = 127, + [1][1][0][0][RTW89_QATAR][12] = 127, + [1][1][0][0][RTW89_FCC][13] = 127, + [1][1][0][0][RTW89_ETSI][13] = 127, + [1][1][0][0][RTW89_MKK][13] = 127, + [1][1][0][0][RTW89_IC][13] = 127, + [1][1][0][0][RTW89_KCC][13] = 127, + [1][1][0][0][RTW89_ACMA][13] = 127, + [1][1][0][0][RTW89_CHILE][13] = 127, + [1][1][0][0][RTW89_UKRAINE][13] = 127, + [1][1][0][0][RTW89_MEXICO][13] = 127, + [1][1][0][0][RTW89_CN][13] = 127, + [1][1][0][0][RTW89_QATAR][13] = 127, + [0][0][1][0][RTW89_FCC][0] = 66, + [0][0][1][0][RTW89_ETSI][0] = 58, + [0][0][1][0][RTW89_MKK][0] = 76, + [0][0][1][0][RTW89_IC][0] = 66, + [0][0][1][0][RTW89_KCC][0] = 76, + [0][0][1][0][RTW89_ACMA][0] = 58, + [0][0][1][0][RTW89_CHILE][0] = 60, + [0][0][1][0][RTW89_UKRAINE][0] = 58, + [0][0][1][0][RTW89_MEXICO][0] = 66, + [0][0][1][0][RTW89_CN][0] = 58, + [0][0][1][0][RTW89_QATAR][0] = 58, + [0][0][1][0][RTW89_FCC][1] = 66, + [0][0][1][0][RTW89_ETSI][1] = 58, + [0][0][1][0][RTW89_MKK][1] = 76, + [0][0][1][0][RTW89_IC][1] = 66, + [0][0][1][0][RTW89_KCC][1] = 76, + [0][0][1][0][RTW89_ACMA][1] = 58, + [0][0][1][0][RTW89_CHILE][1] = 60, + [0][0][1][0][RTW89_UKRAINE][1] = 58, + [0][0][1][0][RTW89_MEXICO][1] = 66, + [0][0][1][0][RTW89_CN][1] = 58, + [0][0][1][0][RTW89_QATAR][1] = 58, + [0][0][1][0][RTW89_FCC][2] = 70, + [0][0][1][0][RTW89_ETSI][2] = 58, + [0][0][1][0][RTW89_MKK][2] = 76, + [0][0][1][0][RTW89_IC][2] = 70, + [0][0][1][0][RTW89_KCC][2] = 76, + [0][0][1][0][RTW89_ACMA][2] = 58, + [0][0][1][0][RTW89_CHILE][2] = 60, + [0][0][1][0][RTW89_UKRAINE][2] = 58, + [0][0][1][0][RTW89_MEXICO][2] = 70, + [0][0][1][0][RTW89_CN][2] = 58, + [0][0][1][0][RTW89_QATAR][2] = 58, + [0][0][1][0][RTW89_FCC][3] = 74, + [0][0][1][0][RTW89_ETSI][3] = 58, + [0][0][1][0][RTW89_MKK][3] = 76, + [0][0][1][0][RTW89_IC][3] = 74, + [0][0][1][0][RTW89_KCC][3] = 76, + [0][0][1][0][RTW89_ACMA][3] = 58, + [0][0][1][0][RTW89_CHILE][3] = 60, + [0][0][1][0][RTW89_UKRAINE][3] = 58, + [0][0][1][0][RTW89_MEXICO][3] = 74, + [0][0][1][0][RTW89_CN][3] = 58, + [0][0][1][0][RTW89_QATAR][3] = 58, + [0][0][1][0][RTW89_FCC][4] = 78, + [0][0][1][0][RTW89_ETSI][4] = 58, + [0][0][1][0][RTW89_MKK][4] = 76, + [0][0][1][0][RTW89_IC][4] = 78, + [0][0][1][0][RTW89_KCC][4] = 76, + [0][0][1][0][RTW89_ACMA][4] = 58, + [0][0][1][0][RTW89_CHILE][4] = 60, + [0][0][1][0][RTW89_UKRAINE][4] = 58, + [0][0][1][0][RTW89_MEXICO][4] = 78, + [0][0][1][0][RTW89_CN][4] = 58, + [0][0][1][0][RTW89_QATAR][4] = 58, + [0][0][1][0][RTW89_FCC][5] = 78, + [0][0][1][0][RTW89_ETSI][5] = 58, + [0][0][1][0][RTW89_MKK][5] = 76, + [0][0][1][0][RTW89_IC][5] = 78, + [0][0][1][0][RTW89_KCC][5] = 76, + [0][0][1][0][RTW89_ACMA][5] = 58, + [0][0][1][0][RTW89_CHILE][5] = 60, + [0][0][1][0][RTW89_UKRAINE][5] = 58, + [0][0][1][0][RTW89_MEXICO][5] = 78, + [0][0][1][0][RTW89_CN][5] = 58, + [0][0][1][0][RTW89_QATAR][5] = 58, + [0][0][1][0][RTW89_FCC][6] = 78, + [0][0][1][0][RTW89_ETSI][6] = 58, + [0][0][1][0][RTW89_MKK][6] = 76, + [0][0][1][0][RTW89_IC][6] = 78, + [0][0][1][0][RTW89_KCC][6] = 76, + [0][0][1][0][RTW89_ACMA][6] = 58, + [0][0][1][0][RTW89_CHILE][6] = 60, + [0][0][1][0][RTW89_UKRAINE][6] = 58, + [0][0][1][0][RTW89_MEXICO][6] = 78, + [0][0][1][0][RTW89_CN][6] = 58, + [0][0][1][0][RTW89_QATAR][6] = 58, + [0][0][1][0][RTW89_FCC][7] = 74, + [0][0][1][0][RTW89_ETSI][7] = 58, + [0][0][1][0][RTW89_MKK][7] = 76, + [0][0][1][0][RTW89_IC][7] = 74, + [0][0][1][0][RTW89_KCC][7] = 76, + [0][0][1][0][RTW89_ACMA][7] = 58, + [0][0][1][0][RTW89_CHILE][7] = 60, + [0][0][1][0][RTW89_UKRAINE][7] = 58, + [0][0][1][0][RTW89_MEXICO][7] = 74, + [0][0][1][0][RTW89_CN][7] = 58, + [0][0][1][0][RTW89_QATAR][7] = 58, + [0][0][1][0][RTW89_FCC][8] = 70, + [0][0][1][0][RTW89_ETSI][8] = 58, + [0][0][1][0][RTW89_MKK][8] = 76, + [0][0][1][0][RTW89_IC][8] = 70, + [0][0][1][0][RTW89_KCC][8] = 76, + [0][0][1][0][RTW89_ACMA][8] = 58, + [0][0][1][0][RTW89_CHILE][8] = 60, + [0][0][1][0][RTW89_UKRAINE][8] = 58, + [0][0][1][0][RTW89_MEXICO][8] = 70, + [0][0][1][0][RTW89_CN][8] = 58, + [0][0][1][0][RTW89_QATAR][8] = 58, + [0][0][1][0][RTW89_FCC][9] = 66, + [0][0][1][0][RTW89_ETSI][9] = 58, + [0][0][1][0][RTW89_MKK][9] = 76, + [0][0][1][0][RTW89_IC][9] = 66, + [0][0][1][0][RTW89_KCC][9] = 76, + [0][0][1][0][RTW89_ACMA][9] = 58, + [0][0][1][0][RTW89_CHILE][9] = 60, + [0][0][1][0][RTW89_UKRAINE][9] = 58, + [0][0][1][0][RTW89_MEXICO][9] = 66, + [0][0][1][0][RTW89_CN][9] = 58, + [0][0][1][0][RTW89_QATAR][9] = 58, + [0][0][1][0][RTW89_FCC][10] = 66, + [0][0][1][0][RTW89_ETSI][10] = 58, + [0][0][1][0][RTW89_MKK][10] = 76, + [0][0][1][0][RTW89_IC][10] = 66, + [0][0][1][0][RTW89_KCC][10] = 76, + [0][0][1][0][RTW89_ACMA][10] = 58, + [0][0][1][0][RTW89_CHILE][10] = 60, + [0][0][1][0][RTW89_UKRAINE][10] = 58, + [0][0][1][0][RTW89_MEXICO][10] = 66, + [0][0][1][0][RTW89_CN][10] = 58, + [0][0][1][0][RTW89_QATAR][10] = 58, + [0][0][1][0][RTW89_FCC][11] = 56, + [0][0][1][0][RTW89_ETSI][11] = 58, + [0][0][1][0][RTW89_MKK][11] = 76, + [0][0][1][0][RTW89_IC][11] = 56, + [0][0][1][0][RTW89_KCC][11] = 76, + [0][0][1][0][RTW89_ACMA][11] = 58, + [0][0][1][0][RTW89_CHILE][11] = 56, + [0][0][1][0][RTW89_UKRAINE][11] = 58, + [0][0][1][0][RTW89_MEXICO][11] = 56, + [0][0][1][0][RTW89_CN][11] = 58, + [0][0][1][0][RTW89_QATAR][11] = 58, + [0][0][1][0][RTW89_FCC][12] = 52, + [0][0][1][0][RTW89_ETSI][12] = 58, + [0][0][1][0][RTW89_MKK][12] = 76, + [0][0][1][0][RTW89_IC][12] = 52, + [0][0][1][0][RTW89_KCC][12] = 76, + [0][0][1][0][RTW89_ACMA][12] = 58, + [0][0][1][0][RTW89_CHILE][12] = 52, + [0][0][1][0][RTW89_UKRAINE][12] = 58, + [0][0][1][0][RTW89_MEXICO][12] = 52, + [0][0][1][0][RTW89_CN][12] = 58, + [0][0][1][0][RTW89_QATAR][12] = 58, + [0][0][1][0][RTW89_FCC][13] = 127, + [0][0][1][0][RTW89_ETSI][13] = 127, + [0][0][1][0][RTW89_MKK][13] = 127, + [0][0][1][0][RTW89_IC][13] = 127, + [0][0][1][0][RTW89_KCC][13] = 127, + [0][0][1][0][RTW89_ACMA][13] = 127, + [0][0][1][0][RTW89_CHILE][13] = 127, + [0][0][1][0][RTW89_UKRAINE][13] = 127, + [0][0][1][0][RTW89_MEXICO][13] = 127, + [0][0][1][0][RTW89_CN][13] = 127, + [0][0][1][0][RTW89_QATAR][13] = 127, + [0][1][1][0][RTW89_FCC][0] = 62, + [0][1][1][0][RTW89_ETSI][0] = 46, + [0][1][1][0][RTW89_MKK][0] = 64, + [0][1][1][0][RTW89_IC][0] = 62, + [0][1][1][0][RTW89_KCC][0] = 70, + [0][1][1][0][RTW89_ACMA][0] = 46, + [0][1][1][0][RTW89_CHILE][0] = 48, + [0][1][1][0][RTW89_UKRAINE][0] = 46, + [0][1][1][0][RTW89_MEXICO][0] = 62, + [0][1][1][0][RTW89_CN][0] = 46, + [0][1][1][0][RTW89_QATAR][0] = 46, + [0][1][1][0][RTW89_FCC][1] = 62, + [0][1][1][0][RTW89_ETSI][1] = 46, + [0][1][1][0][RTW89_MKK][1] = 64, + [0][1][1][0][RTW89_IC][1] = 62, + [0][1][1][0][RTW89_KCC][1] = 70, + [0][1][1][0][RTW89_ACMA][1] = 46, + [0][1][1][0][RTW89_CHILE][1] = 48, + [0][1][1][0][RTW89_UKRAINE][1] = 46, + [0][1][1][0][RTW89_MEXICO][1] = 62, + [0][1][1][0][RTW89_CN][1] = 46, + [0][1][1][0][RTW89_QATAR][1] = 46, + [0][1][1][0][RTW89_FCC][2] = 66, + [0][1][1][0][RTW89_ETSI][2] = 46, + [0][1][1][0][RTW89_MKK][2] = 64, + [0][1][1][0][RTW89_IC][2] = 66, + [0][1][1][0][RTW89_KCC][2] = 70, + [0][1][1][0][RTW89_ACMA][2] = 46, + [0][1][1][0][RTW89_CHILE][2] = 48, + [0][1][1][0][RTW89_UKRAINE][2] = 46, + [0][1][1][0][RTW89_MEXICO][2] = 66, + [0][1][1][0][RTW89_CN][2] = 46, + [0][1][1][0][RTW89_QATAR][2] = 46, + [0][1][1][0][RTW89_FCC][3] = 70, + [0][1][1][0][RTW89_ETSI][3] = 46, + [0][1][1][0][RTW89_MKK][3] = 64, + [0][1][1][0][RTW89_IC][3] = 70, + [0][1][1][0][RTW89_KCC][3] = 70, + [0][1][1][0][RTW89_ACMA][3] = 46, + [0][1][1][0][RTW89_CHILE][3] = 48, + [0][1][1][0][RTW89_UKRAINE][3] = 46, + [0][1][1][0][RTW89_MEXICO][3] = 70, + [0][1][1][0][RTW89_CN][3] = 46, + [0][1][1][0][RTW89_QATAR][3] = 46, + [0][1][1][0][RTW89_FCC][4] = 78, + [0][1][1][0][RTW89_ETSI][4] = 46, + [0][1][1][0][RTW89_MKK][4] = 64, + [0][1][1][0][RTW89_IC][4] = 78, + [0][1][1][0][RTW89_KCC][4] = 70, + [0][1][1][0][RTW89_ACMA][4] = 46, + [0][1][1][0][RTW89_CHILE][4] = 48, + [0][1][1][0][RTW89_UKRAINE][4] = 46, + [0][1][1][0][RTW89_MEXICO][4] = 78, + [0][1][1][0][RTW89_CN][4] = 46, + [0][1][1][0][RTW89_QATAR][4] = 46, + [0][1][1][0][RTW89_FCC][5] = 78, + [0][1][1][0][RTW89_ETSI][5] = 46, + [0][1][1][0][RTW89_MKK][5] = 64, + [0][1][1][0][RTW89_IC][5] = 78, + [0][1][1][0][RTW89_KCC][5] = 70, + [0][1][1][0][RTW89_ACMA][5] = 46, + [0][1][1][0][RTW89_CHILE][5] = 48, + [0][1][1][0][RTW89_UKRAINE][5] = 46, + [0][1][1][0][RTW89_MEXICO][5] = 78, + [0][1][1][0][RTW89_CN][5] = 46, + [0][1][1][0][RTW89_QATAR][5] = 46, + [0][1][1][0][RTW89_FCC][6] = 78, + [0][1][1][0][RTW89_ETSI][6] = 46, + [0][1][1][0][RTW89_MKK][6] = 64, + [0][1][1][0][RTW89_IC][6] = 78, + [0][1][1][0][RTW89_KCC][6] = 70, + [0][1][1][0][RTW89_ACMA][6] = 46, + [0][1][1][0][RTW89_CHILE][6] = 48, + [0][1][1][0][RTW89_UKRAINE][6] = 46, + [0][1][1][0][RTW89_MEXICO][6] = 78, + [0][1][1][0][RTW89_CN][6] = 46, + [0][1][1][0][RTW89_QATAR][6] = 46, + [0][1][1][0][RTW89_FCC][7] = 70, + [0][1][1][0][RTW89_ETSI][7] = 46, + [0][1][1][0][RTW89_MKK][7] = 64, + [0][1][1][0][RTW89_IC][7] = 70, + [0][1][1][0][RTW89_KCC][7] = 70, + [0][1][1][0][RTW89_ACMA][7] = 46, + [0][1][1][0][RTW89_CHILE][7] = 48, + [0][1][1][0][RTW89_UKRAINE][7] = 46, + [0][1][1][0][RTW89_MEXICO][7] = 70, + [0][1][1][0][RTW89_CN][7] = 46, + [0][1][1][0][RTW89_QATAR][7] = 46, + [0][1][1][0][RTW89_FCC][8] = 66, + [0][1][1][0][RTW89_ETSI][8] = 46, + [0][1][1][0][RTW89_MKK][8] = 64, + [0][1][1][0][RTW89_IC][8] = 66, + [0][1][1][0][RTW89_KCC][8] = 70, + [0][1][1][0][RTW89_ACMA][8] = 46, + [0][1][1][0][RTW89_CHILE][8] = 48, + [0][1][1][0][RTW89_UKRAINE][8] = 46, + [0][1][1][0][RTW89_MEXICO][8] = 66, + [0][1][1][0][RTW89_CN][8] = 46, + [0][1][1][0][RTW89_QATAR][8] = 46, + [0][1][1][0][RTW89_FCC][9] = 62, + [0][1][1][0][RTW89_ETSI][9] = 46, + [0][1][1][0][RTW89_MKK][9] = 64, + [0][1][1][0][RTW89_IC][9] = 62, + [0][1][1][0][RTW89_KCC][9] = 70, + [0][1][1][0][RTW89_ACMA][9] = 46, + [0][1][1][0][RTW89_CHILE][9] = 48, + [0][1][1][0][RTW89_UKRAINE][9] = 46, + [0][1][1][0][RTW89_MEXICO][9] = 62, + [0][1][1][0][RTW89_CN][9] = 46, + [0][1][1][0][RTW89_QATAR][9] = 46, + [0][1][1][0][RTW89_FCC][10] = 62, + [0][1][1][0][RTW89_ETSI][10] = 46, + [0][1][1][0][RTW89_MKK][10] = 64, + [0][1][1][0][RTW89_IC][10] = 62, + [0][1][1][0][RTW89_KCC][10] = 70, + [0][1][1][0][RTW89_ACMA][10] = 46, + [0][1][1][0][RTW89_CHILE][10] = 48, + [0][1][1][0][RTW89_UKRAINE][10] = 46, + [0][1][1][0][RTW89_MEXICO][10] = 62, + [0][1][1][0][RTW89_CN][10] = 46, + [0][1][1][0][RTW89_QATAR][10] = 46, + [0][1][1][0][RTW89_FCC][11] = 42, + [0][1][1][0][RTW89_ETSI][11] = 46, + [0][1][1][0][RTW89_MKK][11] = 64, + [0][1][1][0][RTW89_IC][11] = 42, + [0][1][1][0][RTW89_KCC][11] = 70, + [0][1][1][0][RTW89_ACMA][11] = 46, + [0][1][1][0][RTW89_CHILE][11] = 42, + [0][1][1][0][RTW89_UKRAINE][11] = 46, + [0][1][1][0][RTW89_MEXICO][11] = 42, + [0][1][1][0][RTW89_CN][11] = 46, + [0][1][1][0][RTW89_QATAR][11] = 46, + [0][1][1][0][RTW89_FCC][12] = 40, + [0][1][1][0][RTW89_ETSI][12] = 46, + [0][1][1][0][RTW89_MKK][12] = 64, + [0][1][1][0][RTW89_IC][12] = 40, + [0][1][1][0][RTW89_KCC][12] = 70, + [0][1][1][0][RTW89_ACMA][12] = 46, + [0][1][1][0][RTW89_CHILE][12] = 40, + [0][1][1][0][RTW89_UKRAINE][12] = 46, + [0][1][1][0][RTW89_MEXICO][12] = 40, + [0][1][1][0][RTW89_CN][12] = 46, + [0][1][1][0][RTW89_QATAR][12] = 46, + [0][1][1][0][RTW89_FCC][13] = 127, + [0][1][1][0][RTW89_ETSI][13] = 127, + [0][1][1][0][RTW89_MKK][13] = 127, + [0][1][1][0][RTW89_IC][13] = 127, + [0][1][1][0][RTW89_KCC][13] = 127, + [0][1][1][0][RTW89_ACMA][13] = 127, + [0][1][1][0][RTW89_CHILE][13] = 127, + [0][1][1][0][RTW89_UKRAINE][13] = 127, + [0][1][1][0][RTW89_MEXICO][13] = 127, + [0][1][1][0][RTW89_CN][13] = 127, + [0][1][1][0][RTW89_QATAR][13] = 127, + [0][0][2][0][RTW89_FCC][0] = 66, + [0][0][2][0][RTW89_ETSI][0] = 58, + [0][0][2][0][RTW89_MKK][0] = 76, + [0][0][2][0][RTW89_IC][0] = 66, + [0][0][2][0][RTW89_KCC][0] = 76, + [0][0][2][0][RTW89_ACMA][0] = 58, + [0][0][2][0][RTW89_CHILE][0] = 60, + [0][0][2][0][RTW89_UKRAINE][0] = 58, + [0][0][2][0][RTW89_MEXICO][0] = 66, + [0][0][2][0][RTW89_CN][0] = 58, + [0][0][2][0][RTW89_QATAR][0] = 58, + [0][0][2][0][RTW89_FCC][1] = 66, + [0][0][2][0][RTW89_ETSI][1] = 58, + [0][0][2][0][RTW89_MKK][1] = 76, + [0][0][2][0][RTW89_IC][1] = 66, + [0][0][2][0][RTW89_KCC][1] = 76, + [0][0][2][0][RTW89_ACMA][1] = 58, + [0][0][2][0][RTW89_CHILE][1] = 60, + [0][0][2][0][RTW89_UKRAINE][1] = 58, + [0][0][2][0][RTW89_MEXICO][1] = 66, + [0][0][2][0][RTW89_CN][1] = 58, + [0][0][2][0][RTW89_QATAR][1] = 58, + [0][0][2][0][RTW89_FCC][2] = 70, + [0][0][2][0][RTW89_ETSI][2] = 58, + [0][0][2][0][RTW89_MKK][2] = 76, + [0][0][2][0][RTW89_IC][2] = 70, + [0][0][2][0][RTW89_KCC][2] = 76, + [0][0][2][0][RTW89_ACMA][2] = 58, + [0][0][2][0][RTW89_CHILE][2] = 60, + [0][0][2][0][RTW89_UKRAINE][2] = 58, + [0][0][2][0][RTW89_MEXICO][2] = 70, + [0][0][2][0][RTW89_CN][2] = 58, + [0][0][2][0][RTW89_QATAR][2] = 58, + [0][0][2][0][RTW89_FCC][3] = 74, + [0][0][2][0][RTW89_ETSI][3] = 58, + [0][0][2][0][RTW89_MKK][3] = 76, + [0][0][2][0][RTW89_IC][3] = 74, + [0][0][2][0][RTW89_KCC][3] = 76, + [0][0][2][0][RTW89_ACMA][3] = 58, + [0][0][2][0][RTW89_CHILE][3] = 60, + [0][0][2][0][RTW89_UKRAINE][3] = 58, + [0][0][2][0][RTW89_MEXICO][3] = 74, + [0][0][2][0][RTW89_CN][3] = 58, + [0][0][2][0][RTW89_QATAR][3] = 58, + [0][0][2][0][RTW89_FCC][4] = 76, + [0][0][2][0][RTW89_ETSI][4] = 58, + [0][0][2][0][RTW89_MKK][4] = 76, + [0][0][2][0][RTW89_IC][4] = 76, + [0][0][2][0][RTW89_KCC][4] = 76, + [0][0][2][0][RTW89_ACMA][4] = 58, + [0][0][2][0][RTW89_CHILE][4] = 60, + [0][0][2][0][RTW89_UKRAINE][4] = 58, + [0][0][2][0][RTW89_MEXICO][4] = 76, + [0][0][2][0][RTW89_CN][4] = 58, + [0][0][2][0][RTW89_QATAR][4] = 58, + [0][0][2][0][RTW89_FCC][5] = 76, + [0][0][2][0][RTW89_ETSI][5] = 58, + [0][0][2][0][RTW89_MKK][5] = 76, + [0][0][2][0][RTW89_IC][5] = 76, + [0][0][2][0][RTW89_KCC][5] = 76, + [0][0][2][0][RTW89_ACMA][5] = 58, + [0][0][2][0][RTW89_CHILE][5] = 60, + [0][0][2][0][RTW89_UKRAINE][5] = 58, + [0][0][2][0][RTW89_MEXICO][5] = 76, + [0][0][2][0][RTW89_CN][5] = 58, + [0][0][2][0][RTW89_QATAR][5] = 58, + [0][0][2][0][RTW89_FCC][6] = 76, + [0][0][2][0][RTW89_ETSI][6] = 58, + [0][0][2][0][RTW89_MKK][6] = 76, + [0][0][2][0][RTW89_IC][6] = 76, + [0][0][2][0][RTW89_KCC][6] = 76, + [0][0][2][0][RTW89_ACMA][6] = 58, + [0][0][2][0][RTW89_CHILE][6] = 60, + [0][0][2][0][RTW89_UKRAINE][6] = 58, + [0][0][2][0][RTW89_MEXICO][6] = 76, + [0][0][2][0][RTW89_CN][6] = 58, + [0][0][2][0][RTW89_QATAR][6] = 58, + [0][0][2][0][RTW89_FCC][7] = 74, + [0][0][2][0][RTW89_ETSI][7] = 58, + [0][0][2][0][RTW89_MKK][7] = 76, + [0][0][2][0][RTW89_IC][7] = 74, + [0][0][2][0][RTW89_KCC][7] = 76, + [0][0][2][0][RTW89_ACMA][7] = 58, + [0][0][2][0][RTW89_CHILE][7] = 60, + [0][0][2][0][RTW89_UKRAINE][7] = 58, + [0][0][2][0][RTW89_MEXICO][7] = 74, + [0][0][2][0][RTW89_CN][7] = 58, + [0][0][2][0][RTW89_QATAR][7] = 58, + [0][0][2][0][RTW89_FCC][8] = 70, + [0][0][2][0][RTW89_ETSI][8] = 58, + [0][0][2][0][RTW89_MKK][8] = 76, + [0][0][2][0][RTW89_IC][8] = 70, + [0][0][2][0][RTW89_KCC][8] = 76, + [0][0][2][0][RTW89_ACMA][8] = 58, + [0][0][2][0][RTW89_CHILE][8] = 60, + [0][0][2][0][RTW89_UKRAINE][8] = 58, + [0][0][2][0][RTW89_MEXICO][8] = 70, + [0][0][2][0][RTW89_CN][8] = 58, + [0][0][2][0][RTW89_QATAR][8] = 58, + [0][0][2][0][RTW89_FCC][9] = 66, + [0][0][2][0][RTW89_ETSI][9] = 58, + [0][0][2][0][RTW89_MKK][9] = 76, + [0][0][2][0][RTW89_IC][9] = 66, + [0][0][2][0][RTW89_KCC][9] = 76, + [0][0][2][0][RTW89_ACMA][9] = 58, + [0][0][2][0][RTW89_CHILE][9] = 60, + [0][0][2][0][RTW89_UKRAINE][9] = 58, + [0][0][2][0][RTW89_MEXICO][9] = 66, + [0][0][2][0][RTW89_CN][9] = 58, + [0][0][2][0][RTW89_QATAR][9] = 58, + [0][0][2][0][RTW89_FCC][10] = 66, + [0][0][2][0][RTW89_ETSI][10] = 58, + [0][0][2][0][RTW89_MKK][10] = 76, + [0][0][2][0][RTW89_IC][10] = 66, + [0][0][2][0][RTW89_KCC][10] = 76, + [0][0][2][0][RTW89_ACMA][10] = 58, + [0][0][2][0][RTW89_CHILE][10] = 60, + [0][0][2][0][RTW89_UKRAINE][10] = 58, + [0][0][2][0][RTW89_MEXICO][10] = 66, + [0][0][2][0][RTW89_CN][10] = 58, + [0][0][2][0][RTW89_QATAR][10] = 58, + [0][0][2][0][RTW89_FCC][11] = 54, + [0][0][2][0][RTW89_ETSI][11] = 58, + [0][0][2][0][RTW89_MKK][11] = 76, + [0][0][2][0][RTW89_IC][11] = 54, + [0][0][2][0][RTW89_KCC][11] = 76, + [0][0][2][0][RTW89_ACMA][11] = 58, + [0][0][2][0][RTW89_CHILE][11] = 54, + [0][0][2][0][RTW89_UKRAINE][11] = 58, + [0][0][2][0][RTW89_MEXICO][11] = 54, + [0][0][2][0][RTW89_CN][11] = 58, + [0][0][2][0][RTW89_QATAR][11] = 58, + [0][0][2][0][RTW89_FCC][12] = 50, + [0][0][2][0][RTW89_ETSI][12] = 58, + [0][0][2][0][RTW89_MKK][12] = 76, + [0][0][2][0][RTW89_IC][12] = 50, + [0][0][2][0][RTW89_KCC][12] = 74, + [0][0][2][0][RTW89_ACMA][12] = 58, + [0][0][2][0][RTW89_CHILE][12] = 50, + [0][0][2][0][RTW89_UKRAINE][12] = 58, + [0][0][2][0][RTW89_MEXICO][12] = 50, + [0][0][2][0][RTW89_CN][12] = 58, + [0][0][2][0][RTW89_QATAR][12] = 58, + [0][0][2][0][RTW89_FCC][13] = 127, + [0][0][2][0][RTW89_ETSI][13] = 127, + [0][0][2][0][RTW89_MKK][13] = 127, + [0][0][2][0][RTW89_IC][13] = 127, + [0][0][2][0][RTW89_KCC][13] = 127, + [0][0][2][0][RTW89_ACMA][13] = 127, + [0][0][2][0][RTW89_CHILE][13] = 127, + [0][0][2][0][RTW89_UKRAINE][13] = 127, + [0][0][2][0][RTW89_MEXICO][13] = 127, + [0][0][2][0][RTW89_CN][13] = 127, + [0][0][2][0][RTW89_QATAR][13] = 127, + [0][1][2][0][RTW89_FCC][0] = 62, + [0][1][2][0][RTW89_ETSI][0] = 46, + [0][1][2][0][RTW89_MKK][0] = 64, + [0][1][2][0][RTW89_IC][0] = 62, + [0][1][2][0][RTW89_KCC][0] = 68, + [0][1][2][0][RTW89_ACMA][0] = 46, + [0][1][2][0][RTW89_CHILE][0] = 48, + [0][1][2][0][RTW89_UKRAINE][0] = 46, + [0][1][2][0][RTW89_MEXICO][0] = 62, + [0][1][2][0][RTW89_CN][0] = 46, + [0][1][2][0][RTW89_QATAR][0] = 46, + [0][1][2][0][RTW89_FCC][1] = 62, + [0][1][2][0][RTW89_ETSI][1] = 46, + [0][1][2][0][RTW89_MKK][1] = 64, + [0][1][2][0][RTW89_IC][1] = 62, + [0][1][2][0][RTW89_KCC][1] = 70, + [0][1][2][0][RTW89_ACMA][1] = 46, + [0][1][2][0][RTW89_CHILE][1] = 48, + [0][1][2][0][RTW89_UKRAINE][1] = 46, + [0][1][2][0][RTW89_MEXICO][1] = 62, + [0][1][2][0][RTW89_CN][1] = 46, + [0][1][2][0][RTW89_QATAR][1] = 46, + [0][1][2][0][RTW89_FCC][2] = 66, + [0][1][2][0][RTW89_ETSI][2] = 46, + [0][1][2][0][RTW89_MKK][2] = 64, + [0][1][2][0][RTW89_IC][2] = 66, + [0][1][2][0][RTW89_KCC][2] = 70, + [0][1][2][0][RTW89_ACMA][2] = 46, + [0][1][2][0][RTW89_CHILE][2] = 48, + [0][1][2][0][RTW89_UKRAINE][2] = 46, + [0][1][2][0][RTW89_MEXICO][2] = 66, + [0][1][2][0][RTW89_CN][2] = 46, + [0][1][2][0][RTW89_QATAR][2] = 46, + [0][1][2][0][RTW89_FCC][3] = 70, + [0][1][2][0][RTW89_ETSI][3] = 46, + [0][1][2][0][RTW89_MKK][3] = 64, + [0][1][2][0][RTW89_IC][3] = 70, + [0][1][2][0][RTW89_KCC][3] = 70, + [0][1][2][0][RTW89_ACMA][3] = 46, + [0][1][2][0][RTW89_CHILE][3] = 48, + [0][1][2][0][RTW89_UKRAINE][3] = 46, + [0][1][2][0][RTW89_MEXICO][3] = 70, + [0][1][2][0][RTW89_CN][3] = 46, + [0][1][2][0][RTW89_QATAR][3] = 46, + [0][1][2][0][RTW89_FCC][4] = 76, + [0][1][2][0][RTW89_ETSI][4] = 46, + [0][1][2][0][RTW89_MKK][4] = 64, + [0][1][2][0][RTW89_IC][4] = 76, + [0][1][2][0][RTW89_KCC][4] = 70, + [0][1][2][0][RTW89_ACMA][4] = 46, + [0][1][2][0][RTW89_CHILE][4] = 48, + [0][1][2][0][RTW89_UKRAINE][4] = 46, + [0][1][2][0][RTW89_MEXICO][4] = 76, + [0][1][2][0][RTW89_CN][4] = 46, + [0][1][2][0][RTW89_QATAR][4] = 46, + [0][1][2][0][RTW89_FCC][5] = 76, + [0][1][2][0][RTW89_ETSI][5] = 46, + [0][1][2][0][RTW89_MKK][5] = 64, + [0][1][2][0][RTW89_IC][5] = 76, + [0][1][2][0][RTW89_KCC][5] = 70, + [0][1][2][0][RTW89_ACMA][5] = 46, + [0][1][2][0][RTW89_CHILE][5] = 48, + [0][1][2][0][RTW89_UKRAINE][5] = 46, + [0][1][2][0][RTW89_MEXICO][5] = 76, + [0][1][2][0][RTW89_CN][5] = 46, + [0][1][2][0][RTW89_QATAR][5] = 46, + [0][1][2][0][RTW89_FCC][6] = 76, + [0][1][2][0][RTW89_ETSI][6] = 46, + [0][1][2][0][RTW89_MKK][6] = 64, + [0][1][2][0][RTW89_IC][6] = 76, + [0][1][2][0][RTW89_KCC][6] = 70, + [0][1][2][0][RTW89_ACMA][6] = 46, + [0][1][2][0][RTW89_CHILE][6] = 48, + [0][1][2][0][RTW89_UKRAINE][6] = 46, + [0][1][2][0][RTW89_MEXICO][6] = 76, + [0][1][2][0][RTW89_CN][6] = 46, + [0][1][2][0][RTW89_QATAR][6] = 46, + [0][1][2][0][RTW89_FCC][7] = 68, + [0][1][2][0][RTW89_ETSI][7] = 46, + [0][1][2][0][RTW89_MKK][7] = 64, + [0][1][2][0][RTW89_IC][7] = 68, + [0][1][2][0][RTW89_KCC][7] = 70, + [0][1][2][0][RTW89_ACMA][7] = 46, + [0][1][2][0][RTW89_CHILE][7] = 48, + [0][1][2][0][RTW89_UKRAINE][7] = 46, + [0][1][2][0][RTW89_MEXICO][7] = 68, + [0][1][2][0][RTW89_CN][7] = 46, + [0][1][2][0][RTW89_QATAR][7] = 46, + [0][1][2][0][RTW89_FCC][8] = 64, + [0][1][2][0][RTW89_ETSI][8] = 46, + [0][1][2][0][RTW89_MKK][8] = 64, + [0][1][2][0][RTW89_IC][8] = 64, + [0][1][2][0][RTW89_KCC][8] = 70, + [0][1][2][0][RTW89_ACMA][8] = 46, + [0][1][2][0][RTW89_CHILE][8] = 48, + [0][1][2][0][RTW89_UKRAINE][8] = 46, + [0][1][2][0][RTW89_MEXICO][8] = 64, + [0][1][2][0][RTW89_CN][8] = 46, + [0][1][2][0][RTW89_QATAR][8] = 46, + [0][1][2][0][RTW89_FCC][9] = 60, + [0][1][2][0][RTW89_ETSI][9] = 46, + [0][1][2][0][RTW89_MKK][9] = 64, + [0][1][2][0][RTW89_IC][9] = 60, + [0][1][2][0][RTW89_KCC][9] = 70, + [0][1][2][0][RTW89_ACMA][9] = 46, + [0][1][2][0][RTW89_CHILE][9] = 48, + [0][1][2][0][RTW89_UKRAINE][9] = 46, + [0][1][2][0][RTW89_MEXICO][9] = 60, + [0][1][2][0][RTW89_CN][9] = 46, + [0][1][2][0][RTW89_QATAR][9] = 46, + [0][1][2][0][RTW89_FCC][10] = 60, + [0][1][2][0][RTW89_ETSI][10] = 46, + [0][1][2][0][RTW89_MKK][10] = 64, + [0][1][2][0][RTW89_IC][10] = 60, + [0][1][2][0][RTW89_KCC][10] = 70, + [0][1][2][0][RTW89_ACMA][10] = 46, + [0][1][2][0][RTW89_CHILE][10] = 48, + [0][1][2][0][RTW89_UKRAINE][10] = 46, + [0][1][2][0][RTW89_MEXICO][10] = 60, + [0][1][2][0][RTW89_CN][10] = 46, + [0][1][2][0][RTW89_QATAR][10] = 46, + [0][1][2][0][RTW89_FCC][11] = 42, + [0][1][2][0][RTW89_ETSI][11] = 46, + [0][1][2][0][RTW89_MKK][11] = 64, + [0][1][2][0][RTW89_IC][11] = 42, + [0][1][2][0][RTW89_KCC][11] = 70, + [0][1][2][0][RTW89_ACMA][11] = 46, + [0][1][2][0][RTW89_CHILE][11] = 42, + [0][1][2][0][RTW89_UKRAINE][11] = 46, + [0][1][2][0][RTW89_MEXICO][11] = 42, + [0][1][2][0][RTW89_CN][11] = 46, + [0][1][2][0][RTW89_QATAR][11] = 46, + [0][1][2][0][RTW89_FCC][12] = 40, + [0][1][2][0][RTW89_ETSI][12] = 46, + [0][1][2][0][RTW89_MKK][12] = 64, + [0][1][2][0][RTW89_IC][12] = 40, + [0][1][2][0][RTW89_KCC][12] = 68, + [0][1][2][0][RTW89_ACMA][12] = 46, + [0][1][2][0][RTW89_CHILE][12] = 40, + [0][1][2][0][RTW89_UKRAINE][12] = 46, + [0][1][2][0][RTW89_MEXICO][12] = 40, + [0][1][2][0][RTW89_CN][12] = 46, + [0][1][2][0][RTW89_QATAR][12] = 46, + [0][1][2][0][RTW89_FCC][13] = 127, + [0][1][2][0][RTW89_ETSI][13] = 127, + [0][1][2][0][RTW89_MKK][13] = 127, + [0][1][2][0][RTW89_IC][13] = 127, + [0][1][2][0][RTW89_KCC][13] = 127, + [0][1][2][0][RTW89_ACMA][13] = 127, + [0][1][2][0][RTW89_CHILE][13] = 127, + [0][1][2][0][RTW89_UKRAINE][13] = 127, + [0][1][2][0][RTW89_MEXICO][13] = 127, + [0][1][2][0][RTW89_CN][13] = 127, + [0][1][2][0][RTW89_QATAR][13] = 127, + [0][1][2][1][RTW89_FCC][0] = 62, + [0][1][2][1][RTW89_ETSI][0] = 34, + [0][1][2][1][RTW89_MKK][0] = 64, + [0][1][2][1][RTW89_IC][0] = 62, + [0][1][2][1][RTW89_KCC][0] = 68, + [0][1][2][1][RTW89_ACMA][0] = 34, + [0][1][2][1][RTW89_CHILE][0] = 36, + [0][1][2][1][RTW89_UKRAINE][0] = 34, + [0][1][2][1][RTW89_MEXICO][0] = 62, + [0][1][2][1][RTW89_CN][0] = 34, + [0][1][2][1][RTW89_QATAR][0] = 34, + [0][1][2][1][RTW89_FCC][1] = 62, + [0][1][2][1][RTW89_ETSI][1] = 34, + [0][1][2][1][RTW89_MKK][1] = 64, + [0][1][2][1][RTW89_IC][1] = 62, + [0][1][2][1][RTW89_KCC][1] = 70, + [0][1][2][1][RTW89_ACMA][1] = 34, + [0][1][2][1][RTW89_CHILE][1] = 36, + [0][1][2][1][RTW89_UKRAINE][1] = 34, + [0][1][2][1][RTW89_MEXICO][1] = 62, + [0][1][2][1][RTW89_CN][1] = 34, + [0][1][2][1][RTW89_QATAR][1] = 34, + [0][1][2][1][RTW89_FCC][2] = 66, + [0][1][2][1][RTW89_ETSI][2] = 34, + [0][1][2][1][RTW89_MKK][2] = 64, + [0][1][2][1][RTW89_IC][2] = 66, + [0][1][2][1][RTW89_KCC][2] = 70, + [0][1][2][1][RTW89_ACMA][2] = 34, + [0][1][2][1][RTW89_CHILE][2] = 36, + [0][1][2][1][RTW89_UKRAINE][2] = 34, + [0][1][2][1][RTW89_MEXICO][2] = 66, + [0][1][2][1][RTW89_CN][2] = 34, + [0][1][2][1][RTW89_QATAR][2] = 34, + [0][1][2][1][RTW89_FCC][3] = 70, + [0][1][2][1][RTW89_ETSI][3] = 34, + [0][1][2][1][RTW89_MKK][3] = 64, + [0][1][2][1][RTW89_IC][3] = 70, + [0][1][2][1][RTW89_KCC][3] = 70, + [0][1][2][1][RTW89_ACMA][3] = 34, + [0][1][2][1][RTW89_CHILE][3] = 36, + [0][1][2][1][RTW89_UKRAINE][3] = 34, + [0][1][2][1][RTW89_MEXICO][3] = 70, + [0][1][2][1][RTW89_CN][3] = 34, + [0][1][2][1][RTW89_QATAR][3] = 34, + [0][1][2][1][RTW89_FCC][4] = 76, + [0][1][2][1][RTW89_ETSI][4] = 34, + [0][1][2][1][RTW89_MKK][4] = 64, + [0][1][2][1][RTW89_IC][4] = 76, + [0][1][2][1][RTW89_KCC][4] = 70, + [0][1][2][1][RTW89_ACMA][4] = 34, + [0][1][2][1][RTW89_CHILE][4] = 36, + [0][1][2][1][RTW89_UKRAINE][4] = 34, + [0][1][2][1][RTW89_MEXICO][4] = 76, + [0][1][2][1][RTW89_CN][4] = 34, + [0][1][2][1][RTW89_QATAR][4] = 34, + [0][1][2][1][RTW89_FCC][5] = 76, + [0][1][2][1][RTW89_ETSI][5] = 34, + [0][1][2][1][RTW89_MKK][5] = 64, + [0][1][2][1][RTW89_IC][5] = 76, + [0][1][2][1][RTW89_KCC][5] = 70, + [0][1][2][1][RTW89_ACMA][5] = 34, + [0][1][2][1][RTW89_CHILE][5] = 36, + [0][1][2][1][RTW89_UKRAINE][5] = 34, + [0][1][2][1][RTW89_MEXICO][5] = 76, + [0][1][2][1][RTW89_CN][5] = 34, + [0][1][2][1][RTW89_QATAR][5] = 34, + [0][1][2][1][RTW89_FCC][6] = 76, + [0][1][2][1][RTW89_ETSI][6] = 34, + [0][1][2][1][RTW89_MKK][6] = 64, + [0][1][2][1][RTW89_IC][6] = 76, + [0][1][2][1][RTW89_KCC][6] = 70, + [0][1][2][1][RTW89_ACMA][6] = 34, + [0][1][2][1][RTW89_CHILE][6] = 36, + [0][1][2][1][RTW89_UKRAINE][6] = 34, + [0][1][2][1][RTW89_MEXICO][6] = 76, + [0][1][2][1][RTW89_CN][6] = 34, + [0][1][2][1][RTW89_QATAR][6] = 34, + [0][1][2][1][RTW89_FCC][7] = 68, + [0][1][2][1][RTW89_ETSI][7] = 34, + [0][1][2][1][RTW89_MKK][7] = 64, + [0][1][2][1][RTW89_IC][7] = 68, + [0][1][2][1][RTW89_KCC][7] = 70, + [0][1][2][1][RTW89_ACMA][7] = 34, + [0][1][2][1][RTW89_CHILE][7] = 36, + [0][1][2][1][RTW89_UKRAINE][7] = 34, + [0][1][2][1][RTW89_MEXICO][7] = 68, + [0][1][2][1][RTW89_CN][7] = 34, + [0][1][2][1][RTW89_QATAR][7] = 34, + [0][1][2][1][RTW89_FCC][8] = 64, + [0][1][2][1][RTW89_ETSI][8] = 34, + [0][1][2][1][RTW89_MKK][8] = 64, + [0][1][2][1][RTW89_IC][8] = 64, + [0][1][2][1][RTW89_KCC][8] = 70, + [0][1][2][1][RTW89_ACMA][8] = 34, + [0][1][2][1][RTW89_CHILE][8] = 36, + [0][1][2][1][RTW89_UKRAINE][8] = 34, + [0][1][2][1][RTW89_MEXICO][8] = 64, + [0][1][2][1][RTW89_CN][8] = 34, + [0][1][2][1][RTW89_QATAR][8] = 34, + [0][1][2][1][RTW89_FCC][9] = 60, + [0][1][2][1][RTW89_ETSI][9] = 34, + [0][1][2][1][RTW89_MKK][9] = 64, + [0][1][2][1][RTW89_IC][9] = 60, + [0][1][2][1][RTW89_KCC][9] = 70, + [0][1][2][1][RTW89_ACMA][9] = 34, + [0][1][2][1][RTW89_CHILE][9] = 36, + [0][1][2][1][RTW89_UKRAINE][9] = 34, + [0][1][2][1][RTW89_MEXICO][9] = 60, + [0][1][2][1][RTW89_CN][9] = 34, + [0][1][2][1][RTW89_QATAR][9] = 34, + [0][1][2][1][RTW89_FCC][10] = 60, + [0][1][2][1][RTW89_ETSI][10] = 34, + [0][1][2][1][RTW89_MKK][10] = 64, + [0][1][2][1][RTW89_IC][10] = 60, + [0][1][2][1][RTW89_KCC][10] = 70, + [0][1][2][1][RTW89_ACMA][10] = 34, + [0][1][2][1][RTW89_CHILE][10] = 36, + [0][1][2][1][RTW89_UKRAINE][10] = 34, + [0][1][2][1][RTW89_MEXICO][10] = 60, + [0][1][2][1][RTW89_CN][10] = 34, + [0][1][2][1][RTW89_QATAR][10] = 34, + [0][1][2][1][RTW89_FCC][11] = 42, + [0][1][2][1][RTW89_ETSI][11] = 34, + [0][1][2][1][RTW89_MKK][11] = 64, + [0][1][2][1][RTW89_IC][11] = 42, + [0][1][2][1][RTW89_KCC][11] = 70, + [0][1][2][1][RTW89_ACMA][11] = 34, + [0][1][2][1][RTW89_CHILE][11] = 36, + [0][1][2][1][RTW89_UKRAINE][11] = 34, + [0][1][2][1][RTW89_MEXICO][11] = 42, + [0][1][2][1][RTW89_CN][11] = 34, + [0][1][2][1][RTW89_QATAR][11] = 34, + [0][1][2][1][RTW89_FCC][12] = 40, + [0][1][2][1][RTW89_ETSI][12] = 34, + [0][1][2][1][RTW89_MKK][12] = 64, + [0][1][2][1][RTW89_IC][12] = 40, + [0][1][2][1][RTW89_KCC][12] = 68, + [0][1][2][1][RTW89_ACMA][12] = 34, + [0][1][2][1][RTW89_CHILE][12] = 36, + [0][1][2][1][RTW89_UKRAINE][12] = 34, + [0][1][2][1][RTW89_MEXICO][12] = 40, + [0][1][2][1][RTW89_CN][12] = 34, + [0][1][2][1][RTW89_QATAR][12] = 34, + [0][1][2][1][RTW89_FCC][13] = 127, + [0][1][2][1][RTW89_ETSI][13] = 127, + [0][1][2][1][RTW89_MKK][13] = 127, + [0][1][2][1][RTW89_IC][13] = 127, + [0][1][2][1][RTW89_KCC][13] = 127, + [0][1][2][1][RTW89_ACMA][13] = 127, + [0][1][2][1][RTW89_CHILE][13] = 127, + [0][1][2][1][RTW89_UKRAINE][13] = 127, + [0][1][2][1][RTW89_MEXICO][13] = 127, + [0][1][2][1][RTW89_CN][13] = 127, + [0][1][2][1][RTW89_QATAR][13] = 127, + [1][0][2][0][RTW89_FCC][0] = 127, + [1][0][2][0][RTW89_ETSI][0] = 127, + [1][0][2][0][RTW89_MKK][0] = 127, + [1][0][2][0][RTW89_IC][0] = 127, + [1][0][2][0][RTW89_KCC][0] = 127, + [1][0][2][0][RTW89_ACMA][0] = 127, + [1][0][2][0][RTW89_CHILE][0] = 127, + [1][0][2][0][RTW89_UKRAINE][0] = 127, + [1][0][2][0][RTW89_MEXICO][0] = 127, + [1][0][2][0][RTW89_CN][0] = 127, + [1][0][2][0][RTW89_QATAR][0] = 127, + [1][0][2][0][RTW89_FCC][1] = 127, + [1][0][2][0][RTW89_ETSI][1] = 127, + [1][0][2][0][RTW89_MKK][1] = 127, + [1][0][2][0][RTW89_IC][1] = 127, + [1][0][2][0][RTW89_KCC][1] = 127, + [1][0][2][0][RTW89_ACMA][1] = 127, + [1][0][2][0][RTW89_CHILE][1] = 127, + [1][0][2][0][RTW89_UKRAINE][1] = 127, + [1][0][2][0][RTW89_MEXICO][1] = 127, + [1][0][2][0][RTW89_CN][1] = 127, + [1][0][2][0][RTW89_QATAR][1] = 127, + [1][0][2][0][RTW89_FCC][2] = 56, + [1][0][2][0][RTW89_ETSI][2] = 58, + [1][0][2][0][RTW89_MKK][2] = 68, + [1][0][2][0][RTW89_IC][2] = 56, + [1][0][2][0][RTW89_KCC][2] = 68, + [1][0][2][0][RTW89_ACMA][2] = 58, + [1][0][2][0][RTW89_CHILE][2] = 56, + [1][0][2][0][RTW89_UKRAINE][2] = 58, + [1][0][2][0][RTW89_MEXICO][2] = 56, + [1][0][2][0][RTW89_CN][2] = 58, + [1][0][2][0][RTW89_QATAR][2] = 58, + [1][0][2][0][RTW89_FCC][3] = 56, + [1][0][2][0][RTW89_ETSI][3] = 58, + [1][0][2][0][RTW89_MKK][3] = 68, + [1][0][2][0][RTW89_IC][3] = 56, + [1][0][2][0][RTW89_KCC][3] = 68, + [1][0][2][0][RTW89_ACMA][3] = 58, + [1][0][2][0][RTW89_CHILE][3] = 56, + [1][0][2][0][RTW89_UKRAINE][3] = 58, + [1][0][2][0][RTW89_MEXICO][3] = 56, + [1][0][2][0][RTW89_CN][3] = 58, + [1][0][2][0][RTW89_QATAR][3] = 58, + [1][0][2][0][RTW89_FCC][4] = 60, + [1][0][2][0][RTW89_ETSI][4] = 58, + [1][0][2][0][RTW89_MKK][4] = 68, + [1][0][2][0][RTW89_IC][4] = 60, + [1][0][2][0][RTW89_KCC][4] = 68, + [1][0][2][0][RTW89_ACMA][4] = 58, + [1][0][2][0][RTW89_CHILE][4] = 60, + [1][0][2][0][RTW89_UKRAINE][4] = 58, + [1][0][2][0][RTW89_MEXICO][4] = 60, + [1][0][2][0][RTW89_CN][4] = 58, + [1][0][2][0][RTW89_QATAR][4] = 58, + [1][0][2][0][RTW89_FCC][5] = 64, + [1][0][2][0][RTW89_ETSI][5] = 58, + [1][0][2][0][RTW89_MKK][5] = 68, + [1][0][2][0][RTW89_IC][5] = 64, + [1][0][2][0][RTW89_KCC][5] = 68, + [1][0][2][0][RTW89_ACMA][5] = 58, + [1][0][2][0][RTW89_CHILE][5] = 60, + [1][0][2][0][RTW89_UKRAINE][5] = 58, + [1][0][2][0][RTW89_MEXICO][5] = 64, + [1][0][2][0][RTW89_CN][5] = 58, + [1][0][2][0][RTW89_QATAR][5] = 58, + [1][0][2][0][RTW89_FCC][6] = 54, + [1][0][2][0][RTW89_ETSI][6] = 58, + [1][0][2][0][RTW89_MKK][6] = 68, + [1][0][2][0][RTW89_IC][6] = 54, + [1][0][2][0][RTW89_KCC][6] = 68, + [1][0][2][0][RTW89_ACMA][6] = 58, + [1][0][2][0][RTW89_CHILE][6] = 54, + [1][0][2][0][RTW89_UKRAINE][6] = 58, + [1][0][2][0][RTW89_MEXICO][6] = 54, + [1][0][2][0][RTW89_CN][6] = 58, + [1][0][2][0][RTW89_QATAR][6] = 58, + [1][0][2][0][RTW89_FCC][7] = 50, + [1][0][2][0][RTW89_ETSI][7] = 58, + [1][0][2][0][RTW89_MKK][7] = 68, + [1][0][2][0][RTW89_IC][7] = 50, + [1][0][2][0][RTW89_KCC][7] = 68, + [1][0][2][0][RTW89_ACMA][7] = 58, + [1][0][2][0][RTW89_CHILE][7] = 50, + [1][0][2][0][RTW89_UKRAINE][7] = 58, + [1][0][2][0][RTW89_MEXICO][7] = 50, + [1][0][2][0][RTW89_CN][7] = 58, + [1][0][2][0][RTW89_QATAR][7] = 58, + [1][0][2][0][RTW89_FCC][8] = 50, + [1][0][2][0][RTW89_ETSI][8] = 58, + [1][0][2][0][RTW89_MKK][8] = 68, + [1][0][2][0][RTW89_IC][8] = 50, + [1][0][2][0][RTW89_KCC][8] = 68, + [1][0][2][0][RTW89_ACMA][8] = 58, + [1][0][2][0][RTW89_CHILE][8] = 50, + [1][0][2][0][RTW89_UKRAINE][8] = 58, + [1][0][2][0][RTW89_MEXICO][8] = 50, + [1][0][2][0][RTW89_CN][8] = 58, + [1][0][2][0][RTW89_QATAR][8] = 58, + [1][0][2][0][RTW89_FCC][9] = 42, + [1][0][2][0][RTW89_ETSI][9] = 58, + [1][0][2][0][RTW89_MKK][9] = 68, + [1][0][2][0][RTW89_IC][9] = 42, + [1][0][2][0][RTW89_KCC][9] = 68, + [1][0][2][0][RTW89_ACMA][9] = 58, + [1][0][2][0][RTW89_CHILE][9] = 42, + [1][0][2][0][RTW89_UKRAINE][9] = 58, + [1][0][2][0][RTW89_MEXICO][9] = 42, + [1][0][2][0][RTW89_CN][9] = 58, + [1][0][2][0][RTW89_QATAR][9] = 58, + [1][0][2][0][RTW89_FCC][10] = 40, + [1][0][2][0][RTW89_ETSI][10] = 58, + [1][0][2][0][RTW89_MKK][10] = 68, + [1][0][2][0][RTW89_IC][10] = 40, + [1][0][2][0][RTW89_KCC][10] = 68, + [1][0][2][0][RTW89_ACMA][10] = 58, + [1][0][2][0][RTW89_CHILE][10] = 40, + [1][0][2][0][RTW89_UKRAINE][10] = 58, + [1][0][2][0][RTW89_MEXICO][10] = 40, + [1][0][2][0][RTW89_CN][10] = 58, + [1][0][2][0][RTW89_QATAR][10] = 58, + [1][0][2][0][RTW89_FCC][11] = 127, + [1][0][2][0][RTW89_ETSI][11] = 127, + [1][0][2][0][RTW89_MKK][11] = 127, + [1][0][2][0][RTW89_IC][11] = 127, + [1][0][2][0][RTW89_KCC][11] = 127, + [1][0][2][0][RTW89_ACMA][11] = 127, + [1][0][2][0][RTW89_CHILE][11] = 127, + [1][0][2][0][RTW89_UKRAINE][11] = 127, + [1][0][2][0][RTW89_MEXICO][11] = 127, + [1][0][2][0][RTW89_CN][11] = 127, + [1][0][2][0][RTW89_QATAR][11] = 127, + [1][0][2][0][RTW89_FCC][12] = 127, + [1][0][2][0][RTW89_ETSI][12] = 127, + [1][0][2][0][RTW89_MKK][12] = 127, + [1][0][2][0][RTW89_IC][12] = 127, + [1][0][2][0][RTW89_KCC][12] = 127, + [1][0][2][0][RTW89_ACMA][12] = 127, + [1][0][2][0][RTW89_CHILE][12] = 127, + [1][0][2][0][RTW89_UKRAINE][12] = 127, + [1][0][2][0][RTW89_MEXICO][12] = 127, + [1][0][2][0][RTW89_CN][12] = 127, + [1][0][2][0][RTW89_QATAR][12] = 127, + [1][0][2][0][RTW89_FCC][13] = 127, + [1][0][2][0][RTW89_ETSI][13] = 127, + [1][0][2][0][RTW89_MKK][13] = 127, + [1][0][2][0][RTW89_IC][13] = 127, + [1][0][2][0][RTW89_KCC][13] = 127, + [1][0][2][0][RTW89_ACMA][13] = 127, + [1][0][2][0][RTW89_CHILE][13] = 127, + [1][0][2][0][RTW89_UKRAINE][13] = 127, + [1][0][2][0][RTW89_MEXICO][13] = 127, + [1][0][2][0][RTW89_CN][13] = 127, + [1][0][2][0][RTW89_QATAR][13] = 127, + [1][1][2][0][RTW89_FCC][0] = 127, + [1][1][2][0][RTW89_ETSI][0] = 127, + [1][1][2][0][RTW89_MKK][0] = 127, + [1][1][2][0][RTW89_IC][0] = 127, + [1][1][2][0][RTW89_KCC][0] = 127, + [1][1][2][0][RTW89_ACMA][0] = 127, + [1][1][2][0][RTW89_CHILE][0] = 127, + [1][1][2][0][RTW89_UKRAINE][0] = 127, + [1][1][2][0][RTW89_MEXICO][0] = 127, + [1][1][2][0][RTW89_CN][0] = 127, + [1][1][2][0][RTW89_QATAR][0] = 127, + [1][1][2][0][RTW89_FCC][1] = 127, + [1][1][2][0][RTW89_ETSI][1] = 127, + [1][1][2][0][RTW89_MKK][1] = 127, + [1][1][2][0][RTW89_IC][1] = 127, + [1][1][2][0][RTW89_KCC][1] = 127, + [1][1][2][0][RTW89_ACMA][1] = 127, + [1][1][2][0][RTW89_CHILE][1] = 127, + [1][1][2][0][RTW89_UKRAINE][1] = 127, + [1][1][2][0][RTW89_MEXICO][1] = 127, + [1][1][2][0][RTW89_CN][1] = 127, + [1][1][2][0][RTW89_QATAR][1] = 127, + [1][1][2][0][RTW89_FCC][2] = 52, + [1][1][2][0][RTW89_ETSI][2] = 46, + [1][1][2][0][RTW89_MKK][2] = 64, + [1][1][2][0][RTW89_IC][2] = 52, + [1][1][2][0][RTW89_KCC][2] = 66, + [1][1][2][0][RTW89_ACMA][2] = 46, + [1][1][2][0][RTW89_CHILE][2] = 48, + [1][1][2][0][RTW89_UKRAINE][2] = 46, + [1][1][2][0][RTW89_MEXICO][2] = 52, + [1][1][2][0][RTW89_CN][2] = 46, + [1][1][2][0][RTW89_QATAR][2] = 46, + [1][1][2][0][RTW89_FCC][3] = 52, + [1][1][2][0][RTW89_ETSI][3] = 46, + [1][1][2][0][RTW89_MKK][3] = 64, + [1][1][2][0][RTW89_IC][3] = 52, + [1][1][2][0][RTW89_KCC][3] = 68, + [1][1][2][0][RTW89_ACMA][3] = 46, + [1][1][2][0][RTW89_CHILE][3] = 48, + [1][1][2][0][RTW89_UKRAINE][3] = 46, + [1][1][2][0][RTW89_MEXICO][3] = 52, + [1][1][2][0][RTW89_CN][3] = 46, + [1][1][2][0][RTW89_QATAR][3] = 46, + [1][1][2][0][RTW89_FCC][4] = 56, + [1][1][2][0][RTW89_ETSI][4] = 46, + [1][1][2][0][RTW89_MKK][4] = 64, + [1][1][2][0][RTW89_IC][4] = 56, + [1][1][2][0][RTW89_KCC][4] = 68, + [1][1][2][0][RTW89_ACMA][4] = 46, + [1][1][2][0][RTW89_CHILE][4] = 48, + [1][1][2][0][RTW89_UKRAINE][4] = 46, + [1][1][2][0][RTW89_MEXICO][4] = 56, + [1][1][2][0][RTW89_CN][4] = 46, + [1][1][2][0][RTW89_QATAR][4] = 46, + [1][1][2][0][RTW89_FCC][5] = 60, + [1][1][2][0][RTW89_ETSI][5] = 46, + [1][1][2][0][RTW89_MKK][5] = 64, + [1][1][2][0][RTW89_IC][5] = 60, + [1][1][2][0][RTW89_KCC][5] = 68, + [1][1][2][0][RTW89_ACMA][5] = 46, + [1][1][2][0][RTW89_CHILE][5] = 48, + [1][1][2][0][RTW89_UKRAINE][5] = 46, + [1][1][2][0][RTW89_MEXICO][5] = 60, + [1][1][2][0][RTW89_CN][5] = 46, + [1][1][2][0][RTW89_QATAR][5] = 46, + [1][1][2][0][RTW89_FCC][6] = 54, + [1][1][2][0][RTW89_ETSI][6] = 46, + [1][1][2][0][RTW89_MKK][6] = 64, + [1][1][2][0][RTW89_IC][6] = 52, + [1][1][2][0][RTW89_KCC][6] = 68, + [1][1][2][0][RTW89_ACMA][6] = 46, + [1][1][2][0][RTW89_CHILE][6] = 48, + [1][1][2][0][RTW89_UKRAINE][6] = 46, + [1][1][2][0][RTW89_MEXICO][6] = 54, + [1][1][2][0][RTW89_CN][6] = 46, + [1][1][2][0][RTW89_QATAR][6] = 46, + [1][1][2][0][RTW89_FCC][7] = 50, + [1][1][2][0][RTW89_ETSI][7] = 46, + [1][1][2][0][RTW89_MKK][7] = 64, + [1][1][2][0][RTW89_IC][7] = 48, + [1][1][2][0][RTW89_KCC][7] = 68, + [1][1][2][0][RTW89_ACMA][7] = 46, + [1][1][2][0][RTW89_CHILE][7] = 48, + [1][1][2][0][RTW89_UKRAINE][7] = 46, + [1][1][2][0][RTW89_MEXICO][7] = 50, + [1][1][2][0][RTW89_CN][7] = 46, + [1][1][2][0][RTW89_QATAR][7] = 46, + [1][1][2][0][RTW89_FCC][8] = 50, + [1][1][2][0][RTW89_ETSI][8] = 46, + [1][1][2][0][RTW89_MKK][8] = 64, + [1][1][2][0][RTW89_IC][8] = 48, + [1][1][2][0][RTW89_KCC][8] = 68, + [1][1][2][0][RTW89_ACMA][8] = 46, + [1][1][2][0][RTW89_CHILE][8] = 48, + [1][1][2][0][RTW89_UKRAINE][8] = 46, + [1][1][2][0][RTW89_MEXICO][8] = 50, + [1][1][2][0][RTW89_CN][8] = 46, + [1][1][2][0][RTW89_QATAR][8] = 46, + [1][1][2][0][RTW89_FCC][9] = 38, + [1][1][2][0][RTW89_ETSI][9] = 46, + [1][1][2][0][RTW89_MKK][9] = 64, + [1][1][2][0][RTW89_IC][9] = 38, + [1][1][2][0][RTW89_KCC][9] = 68, + [1][1][2][0][RTW89_ACMA][9] = 46, + [1][1][2][0][RTW89_CHILE][9] = 38, + [1][1][2][0][RTW89_UKRAINE][9] = 46, + [1][1][2][0][RTW89_MEXICO][9] = 38, + [1][1][2][0][RTW89_CN][9] = 46, + [1][1][2][0][RTW89_QATAR][9] = 46, + [1][1][2][0][RTW89_FCC][10] = 36, + [1][1][2][0][RTW89_ETSI][10] = 46, + [1][1][2][0][RTW89_MKK][10] = 64, + [1][1][2][0][RTW89_IC][10] = 36, + [1][1][2][0][RTW89_KCC][10] = 66, + [1][1][2][0][RTW89_ACMA][10] = 46, + [1][1][2][0][RTW89_CHILE][10] = 36, + [1][1][2][0][RTW89_UKRAINE][10] = 46, + [1][1][2][0][RTW89_MEXICO][10] = 36, + [1][1][2][0][RTW89_CN][10] = 46, + [1][1][2][0][RTW89_QATAR][10] = 46, + [1][1][2][0][RTW89_FCC][11] = 127, + [1][1][2][0][RTW89_ETSI][11] = 127, + [1][1][2][0][RTW89_MKK][11] = 127, + [1][1][2][0][RTW89_IC][11] = 127, + [1][1][2][0][RTW89_KCC][11] = 127, + [1][1][2][0][RTW89_ACMA][11] = 127, + [1][1][2][0][RTW89_CHILE][11] = 127, + [1][1][2][0][RTW89_UKRAINE][11] = 127, + [1][1][2][0][RTW89_MEXICO][11] = 127, + [1][1][2][0][RTW89_CN][11] = 127, + [1][1][2][0][RTW89_QATAR][11] = 127, + [1][1][2][0][RTW89_FCC][12] = 127, + [1][1][2][0][RTW89_ETSI][12] = 127, + [1][1][2][0][RTW89_MKK][12] = 127, + [1][1][2][0][RTW89_IC][12] = 127, + [1][1][2][0][RTW89_KCC][12] = 127, + [1][1][2][0][RTW89_ACMA][12] = 127, + [1][1][2][0][RTW89_CHILE][12] = 127, + [1][1][2][0][RTW89_UKRAINE][12] = 127, + [1][1][2][0][RTW89_MEXICO][12] = 127, + [1][1][2][0][RTW89_CN][12] = 127, + [1][1][2][0][RTW89_QATAR][12] = 127, + [1][1][2][0][RTW89_FCC][13] = 127, + [1][1][2][0][RTW89_ETSI][13] = 127, + [1][1][2][0][RTW89_MKK][13] = 127, + [1][1][2][0][RTW89_IC][13] = 127, + [1][1][2][0][RTW89_KCC][13] = 127, + [1][1][2][0][RTW89_ACMA][13] = 127, + [1][1][2][0][RTW89_CHILE][13] = 127, + [1][1][2][0][RTW89_UKRAINE][13] = 127, + [1][1][2][0][RTW89_MEXICO][13] = 127, + [1][1][2][0][RTW89_CN][13] = 127, + [1][1][2][0][RTW89_QATAR][13] = 127, + [1][1][2][1][RTW89_FCC][0] = 127, + [1][1][2][1][RTW89_ETSI][0] = 127, + [1][1][2][1][RTW89_MKK][0] = 127, + [1][1][2][1][RTW89_IC][0] = 127, + [1][1][2][1][RTW89_KCC][0] = 127, + [1][1][2][1][RTW89_ACMA][0] = 127, + [1][1][2][1][RTW89_CHILE][0] = 127, + [1][1][2][1][RTW89_UKRAINE][0] = 127, + [1][1][2][1][RTW89_MEXICO][0] = 127, + [1][1][2][1][RTW89_CN][0] = 127, + [1][1][2][1][RTW89_QATAR][0] = 127, + [1][1][2][1][RTW89_FCC][1] = 127, + [1][1][2][1][RTW89_ETSI][1] = 127, + [1][1][2][1][RTW89_MKK][1] = 127, + [1][1][2][1][RTW89_IC][1] = 127, + [1][1][2][1][RTW89_KCC][1] = 127, + [1][1][2][1][RTW89_ACMA][1] = 127, + [1][1][2][1][RTW89_CHILE][1] = 127, + [1][1][2][1][RTW89_UKRAINE][1] = 127, + [1][1][2][1][RTW89_MEXICO][1] = 127, + [1][1][2][1][RTW89_CN][1] = 127, + [1][1][2][1][RTW89_QATAR][1] = 127, + [1][1][2][1][RTW89_FCC][2] = 52, + [1][1][2][1][RTW89_ETSI][2] = 34, + [1][1][2][1][RTW89_MKK][2] = 64, + [1][1][2][1][RTW89_IC][2] = 52, + [1][1][2][1][RTW89_KCC][2] = 66, + [1][1][2][1][RTW89_ACMA][2] = 34, + [1][1][2][1][RTW89_CHILE][2] = 36, + [1][1][2][1][RTW89_UKRAINE][2] = 34, + [1][1][2][1][RTW89_MEXICO][2] = 52, + [1][1][2][1][RTW89_CN][2] = 34, + [1][1][2][1][RTW89_QATAR][2] = 34, + [1][1][2][1][RTW89_FCC][3] = 52, + [1][1][2][1][RTW89_ETSI][3] = 34, + [1][1][2][1][RTW89_MKK][3] = 64, + [1][1][2][1][RTW89_IC][3] = 52, + [1][1][2][1][RTW89_KCC][3] = 68, + [1][1][2][1][RTW89_ACMA][3] = 34, + [1][1][2][1][RTW89_CHILE][3] = 36, + [1][1][2][1][RTW89_UKRAINE][3] = 34, + [1][1][2][1][RTW89_MEXICO][3] = 52, + [1][1][2][1][RTW89_CN][3] = 34, + [1][1][2][1][RTW89_QATAR][3] = 34, + [1][1][2][1][RTW89_FCC][4] = 56, + [1][1][2][1][RTW89_ETSI][4] = 34, + [1][1][2][1][RTW89_MKK][4] = 64, + [1][1][2][1][RTW89_IC][4] = 56, + [1][1][2][1][RTW89_KCC][4] = 68, + [1][1][2][1][RTW89_ACMA][4] = 34, + [1][1][2][1][RTW89_CHILE][4] = 36, + [1][1][2][1][RTW89_UKRAINE][4] = 34, + [1][1][2][1][RTW89_MEXICO][4] = 56, + [1][1][2][1][RTW89_CN][4] = 34, + [1][1][2][1][RTW89_QATAR][4] = 34, + [1][1][2][1][RTW89_FCC][5] = 60, + [1][1][2][1][RTW89_ETSI][5] = 34, + [1][1][2][1][RTW89_MKK][5] = 64, + [1][1][2][1][RTW89_IC][5] = 60, + [1][1][2][1][RTW89_KCC][5] = 68, + [1][1][2][1][RTW89_ACMA][5] = 34, + [1][1][2][1][RTW89_CHILE][5] = 36, + [1][1][2][1][RTW89_UKRAINE][5] = 34, + [1][1][2][1][RTW89_MEXICO][5] = 60, + [1][1][2][1][RTW89_CN][5] = 34, + [1][1][2][1][RTW89_QATAR][5] = 34, + [1][1][2][1][RTW89_FCC][6] = 54, + [1][1][2][1][RTW89_ETSI][6] = 34, + [1][1][2][1][RTW89_MKK][6] = 64, + [1][1][2][1][RTW89_IC][6] = 52, + [1][1][2][1][RTW89_KCC][6] = 68, + [1][1][2][1][RTW89_ACMA][6] = 34, + [1][1][2][1][RTW89_CHILE][6] = 36, + [1][1][2][1][RTW89_UKRAINE][6] = 34, + [1][1][2][1][RTW89_MEXICO][6] = 54, + [1][1][2][1][RTW89_CN][6] = 34, + [1][1][2][1][RTW89_QATAR][6] = 34, + [1][1][2][1][RTW89_FCC][7] = 50, + [1][1][2][1][RTW89_ETSI][7] = 34, + [1][1][2][1][RTW89_MKK][7] = 64, + [1][1][2][1][RTW89_IC][7] = 48, + [1][1][2][1][RTW89_KCC][7] = 68, + [1][1][2][1][RTW89_ACMA][7] = 34, + [1][1][2][1][RTW89_CHILE][7] = 36, + [1][1][2][1][RTW89_UKRAINE][7] = 34, + [1][1][2][1][RTW89_MEXICO][7] = 50, + [1][1][2][1][RTW89_CN][7] = 34, + [1][1][2][1][RTW89_QATAR][7] = 34, + [1][1][2][1][RTW89_FCC][8] = 50, + [1][1][2][1][RTW89_ETSI][8] = 34, + [1][1][2][1][RTW89_MKK][8] = 64, + [1][1][2][1][RTW89_IC][8] = 48, + [1][1][2][1][RTW89_KCC][8] = 68, + [1][1][2][1][RTW89_ACMA][8] = 34, + [1][1][2][1][RTW89_CHILE][8] = 36, + [1][1][2][1][RTW89_UKRAINE][8] = 34, + [1][1][2][1][RTW89_MEXICO][8] = 50, + [1][1][2][1][RTW89_CN][8] = 34, + [1][1][2][1][RTW89_QATAR][8] = 34, + [1][1][2][1][RTW89_FCC][9] = 38, + [1][1][2][1][RTW89_ETSI][9] = 34, + [1][1][2][1][RTW89_MKK][9] = 64, + [1][1][2][1][RTW89_IC][9] = 38, + [1][1][2][1][RTW89_KCC][9] = 68, + [1][1][2][1][RTW89_ACMA][9] = 34, + [1][1][2][1][RTW89_CHILE][9] = 36, + [1][1][2][1][RTW89_UKRAINE][9] = 34, + [1][1][2][1][RTW89_MEXICO][9] = 38, + [1][1][2][1][RTW89_CN][9] = 34, + [1][1][2][1][RTW89_QATAR][9] = 34, + [1][1][2][1][RTW89_FCC][10] = 36, + [1][1][2][1][RTW89_ETSI][10] = 34, + [1][1][2][1][RTW89_MKK][10] = 64, + [1][1][2][1][RTW89_IC][10] = 36, + [1][1][2][1][RTW89_KCC][10] = 66, + [1][1][2][1][RTW89_ACMA][10] = 34, + [1][1][2][1][RTW89_CHILE][10] = 36, + [1][1][2][1][RTW89_UKRAINE][10] = 34, + [1][1][2][1][RTW89_MEXICO][10] = 36, + [1][1][2][1][RTW89_CN][10] = 34, + [1][1][2][1][RTW89_QATAR][10] = 34, + [1][1][2][1][RTW89_FCC][11] = 127, + [1][1][2][1][RTW89_ETSI][11] = 127, + [1][1][2][1][RTW89_MKK][11] = 127, + [1][1][2][1][RTW89_IC][11] = 127, + [1][1][2][1][RTW89_KCC][11] = 127, + [1][1][2][1][RTW89_ACMA][11] = 127, + [1][1][2][1][RTW89_CHILE][11] = 127, + [1][1][2][1][RTW89_UKRAINE][11] = 127, + [1][1][2][1][RTW89_MEXICO][11] = 127, + [1][1][2][1][RTW89_CN][11] = 127, + [1][1][2][1][RTW89_QATAR][11] = 127, + [1][1][2][1][RTW89_FCC][12] = 127, + [1][1][2][1][RTW89_ETSI][12] = 127, + [1][1][2][1][RTW89_MKK][12] = 127, + [1][1][2][1][RTW89_IC][12] = 127, + [1][1][2][1][RTW89_KCC][12] = 127, + [1][1][2][1][RTW89_ACMA][12] = 127, + [1][1][2][1][RTW89_CHILE][12] = 127, + [1][1][2][1][RTW89_UKRAINE][12] = 127, + [1][1][2][1][RTW89_MEXICO][12] = 127, + [1][1][2][1][RTW89_CN][12] = 127, + [1][1][2][1][RTW89_QATAR][12] = 127, + [1][1][2][1][RTW89_FCC][13] = 127, + [1][1][2][1][RTW89_ETSI][13] = 127, + [1][1][2][1][RTW89_MKK][13] = 127, + [1][1][2][1][RTW89_IC][13] = 127, + [1][1][2][1][RTW89_KCC][13] = 127, + [1][1][2][1][RTW89_ACMA][13] = 127, + [1][1][2][1][RTW89_CHILE][13] = 127, + [1][1][2][1][RTW89_UKRAINE][13] = 127, + [1][1][2][1][RTW89_MEXICO][13] = 127, + [1][1][2][1][RTW89_CN][13] = 127, + [1][1][2][1][RTW89_QATAR][13] = 127, }; const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = { - [0][0][1][0][0][0] = 30, - [0][0][1][0][0][2] = 30, - [0][0][1][0][0][4] = 30, - [0][0][1][0][0][6] = 30, - [0][0][1][0][0][8] = 52, - [0][0][1][0][0][10] = 52, - [0][0][1][0][0][12] = 52, - [0][0][1][0][0][14] = 52, - [0][0][1][0][0][15] = 52, - [0][0][1][0][0][17] = 52, - [0][0][1][0][0][19] = 52, - [0][0][1][0][0][21] = 52, - [0][0][1][0][0][23] = 52, - [0][0][1][0][0][25] = 52, - [0][0][1][0][0][27] = 52, - [0][0][1][0][0][29] = 52, - [0][0][1][0][0][31] = 52, - [0][0][1][0][0][33] = 52, - [0][0][1][0][0][35] = 52, - [0][0][1][0][0][37] = 54, - [0][0][1][0][0][38] = 28, - [0][0][1][0][0][40] = 28, - [0][0][1][0][0][42] = 28, - [0][0][1][0][0][44] = 28, - [0][0][1][0][0][46] = 28, - [0][1][1][0][0][0] = 18, - [0][1][1][0][0][2] = 18, - [0][1][1][0][0][4] = 18, - [0][1][1][0][0][6] = 18, - [0][1][1][0][0][8] = 40, - [0][1][1][0][0][10] = 40, - [0][1][1][0][0][12] = 40, - [0][1][1][0][0][14] = 40, - [0][1][1][0][0][15] = 40, - [0][1][1][0][0][17] = 40, - [0][1][1][0][0][19] = 40, - [0][1][1][0][0][21] = 40, - [0][1][1][0][0][23] = 40, - [0][1][1][0][0][25] = 40, - [0][1][1][0][0][27] = 40, - [0][1][1][0][0][29] = 40, - [0][1][1][0][0][31] = 40, - [0][1][1][0][0][33] = 40, - [0][1][1][0][0][35] = 40, - [0][1][1][0][0][37] = 42, - [0][1][1][0][0][38] = 16, - [0][1][1][0][0][40] = 16, - [0][1][1][0][0][42] = 16, - [0][1][1][0][0][44] = 16, - [0][1][1][0][0][46] = 16, - [0][0][2][0][0][0] = 30, - [0][0][2][0][0][2] = 30, - [0][0][2][0][0][4] = 30, - [0][0][2][0][0][6] = 30, - [0][0][2][0][0][8] = 52, - [0][0][2][0][0][10] = 52, - [0][0][2][0][0][12] = 52, - [0][0][2][0][0][14] = 52, - [0][0][2][0][0][15] = 52, - [0][0][2][0][0][17] = 52, - [0][0][2][0][0][19] = 52, - [0][0][2][0][0][21] = 52, - [0][0][2][0][0][23] = 52, - [0][0][2][0][0][25] = 52, - [0][0][2][0][0][27] = 52, - [0][0][2][0][0][29] = 52, - [0][0][2][0][0][31] = 52, - [0][0][2][0][0][33] = 52, - [0][0][2][0][0][35] = 52, - [0][0][2][0][0][37] = 54, - [0][0][2][0][0][38] = 28, - [0][0][2][0][0][40] = 28, - [0][0][2][0][0][42] = 28, - [0][0][2][0][0][44] = 28, - [0][0][2][0][0][46] = 28, - [0][1][2][0][0][0] = 18, - [0][1][2][0][0][2] = 18, - [0][1][2][0][0][4] = 18, - [0][1][2][0][0][6] = 18, - [0][1][2][0][0][8] = 40, - [0][1][2][0][0][10] = 40, - [0][1][2][0][0][12] = 40, - [0][1][2][0][0][14] = 40, - [0][1][2][0][0][15] = 40, - [0][1][2][0][0][17] = 40, - [0][1][2][0][0][19] = 40, - [0][1][2][0][0][21] = 40, - [0][1][2][0][0][23] = 40, - [0][1][2][0][0][25] = 40, - [0][1][2][0][0][27] = 40, - [0][1][2][0][0][29] = 40, - [0][1][2][0][0][31] = 40, - [0][1][2][0][0][33] = 40, - [0][1][2][0][0][35] = 40, - [0][1][2][0][0][37] = 42, - [0][1][2][0][0][38] = 16, - [0][1][2][0][0][40] = 16, - [0][1][2][0][0][42] = 16, - [0][1][2][0][0][44] = 16, - [0][1][2][0][0][46] = 16, - [0][1][2][1][0][0] = 6, - [0][1][2][1][0][2] = 6, - [0][1][2][1][0][4] = 6, - [0][1][2][1][0][6] = 6, - [0][1][2][1][0][8] = 28, - [0][1][2][1][0][10] = 28, - [0][1][2][1][0][12] = 28, - [0][1][2][1][0][14] = 28, - [0][1][2][1][0][15] = 28, - [0][1][2][1][0][17] = 28, - [0][1][2][1][0][19] = 28, - [0][1][2][1][0][21] = 28, - [0][1][2][1][0][23] = 28, - [0][1][2][1][0][25] = 28, - [0][1][2][1][0][27] = 28, - [0][1][2][1][0][29] = 28, - [0][1][2][1][0][31] = 28, - [0][1][2][1][0][33] = 28, - [0][1][2][1][0][35] = 28, - [0][1][2][1][0][37] = 30, - [0][1][2][1][0][38] = 4, - [0][1][2][1][0][40] = 4, - [0][1][2][1][0][42] = 4, - [0][1][2][1][0][44] = 4, - [0][1][2][1][0][46] = 4, - [1][0][2][0][0][1] = 30, - [1][0][2][0][0][5] = 30, - [1][0][2][0][0][9] = 52, - [1][0][2][0][0][13] = 52, - [1][0][2][0][0][16] = 52, - [1][0][2][0][0][20] = 52, - [1][0][2][0][0][24] = 52, - [1][0][2][0][0][28] = 52, - [1][0][2][0][0][32] = 52, - [1][0][2][0][0][36] = 54, - [1][0][2][0][0][39] = 28, - [1][0][2][0][0][43] = 28, - [1][1][2][0][0][1] = 18, - [1][1][2][0][0][5] = 18, - [1][1][2][0][0][9] = 40, - [1][1][2][0][0][13] = 40, - [1][1][2][0][0][16] = 40, - [1][1][2][0][0][20] = 40, - [1][1][2][0][0][24] = 40, - [1][1][2][0][0][28] = 40, - [1][1][2][0][0][32] = 40, - [1][1][2][0][0][36] = 42, - [1][1][2][0][0][39] = 16, - [1][1][2][0][0][43] = 16, - [1][1][2][1][0][1] = 6, - [1][1][2][1][0][5] = 6, - [1][1][2][1][0][9] = 28, - [1][1][2][1][0][13] = 28, - [1][1][2][1][0][16] = 28, - [1][1][2][1][0][20] = 28, - [1][1][2][1][0][24] = 28, - [1][1][2][1][0][28] = 28, - [1][1][2][1][0][32] = 28, - [1][1][2][1][0][36] = 30, - [1][1][2][1][0][39] = 4, - [1][1][2][1][0][43] = 4, - [2][0][2][0][0][3] = 30, - [2][0][2][0][0][11] = 52, - [2][0][2][0][0][18] = 52, - [2][0][2][0][0][26] = 52, - [2][0][2][0][0][34] = 54, - [2][0][2][0][0][41] = 28, - [2][1][2][0][0][3] = 18, - [2][1][2][0][0][11] = 40, - [2][1][2][0][0][18] = 40, - [2][1][2][0][0][26] = 40, - [2][1][2][0][0][34] = 42, - [2][1][2][0][0][41] = 16, - [2][1][2][1][0][3] = 6, - [2][1][2][1][0][11] = 28, - [2][1][2][1][0][18] = 28, - [2][1][2][1][0][26] = 28, - [2][1][2][1][0][34] = 30, - [2][1][2][1][0][41] = 4, - [0][0][1][0][2][0] = 76, - [0][0][1][0][1][0] = 58, - [0][0][1][0][3][0] = 62, - [0][0][1][0][5][0] = 62, - [0][0][1][0][6][0] = 58, - [0][0][1][0][9][0] = 58, - [0][0][1][0][8][0] = 30, - [0][0][1][0][11][0] = 52, - [0][0][1][0][2][2] = 76, - [0][0][1][0][1][2] = 58, - [0][0][1][0][3][2] = 62, - [0][0][1][0][5][2] = 62, - [0][0][1][0][6][2] = 58, - [0][0][1][0][9][2] = 58, - [0][0][1][0][8][2] = 30, - [0][0][1][0][11][2] = 52, - [0][0][1][0][2][4] = 76, - [0][0][1][0][1][4] = 58, - [0][0][1][0][3][4] = 62, - [0][0][1][0][5][4] = 62, - [0][0][1][0][6][4] = 58, - [0][0][1][0][9][4] = 58, - [0][0][1][0][8][4] = 30, - [0][0][1][0][11][4] = 52, - [0][0][1][0][2][6] = 76, - [0][0][1][0][1][6] = 58, - [0][0][1][0][3][6] = 62, - [0][0][1][0][5][6] = 62, - [0][0][1][0][6][6] = 54, - [0][0][1][0][9][6] = 58, - [0][0][1][0][8][6] = 30, - [0][0][1][0][11][6] = 52, - [0][0][1][0][2][8] = 76, - [0][0][1][0][1][8] = 58, - [0][0][1][0][3][8] = 62, - [0][0][1][0][5][8] = 64, - [0][0][1][0][6][8] = 58, - [0][0][1][0][9][8] = 58, - [0][0][1][0][8][8] = 54, - [0][0][1][0][11][8] = 52, - [0][0][1][0][2][10] = 76, - [0][0][1][0][1][10] = 58, - [0][0][1][0][3][10] = 62, - [0][0][1][0][5][10] = 64, - [0][0][1][0][6][10] = 58, - [0][0][1][0][9][10] = 58, - [0][0][1][0][8][10] = 54, - [0][0][1][0][11][10] = 52, - [0][0][1][0][2][12] = 76, - [0][0][1][0][1][12] = 58, - [0][0][1][0][3][12] = 62, - [0][0][1][0][5][12] = 64, - [0][0][1][0][6][12] = 58, - [0][0][1][0][9][12] = 58, - [0][0][1][0][8][12] = 54, - [0][0][1][0][11][12] = 52, - [0][0][1][0][2][14] = 76, - [0][0][1][0][1][14] = 58, - [0][0][1][0][3][14] = 62, - [0][0][1][0][5][14] = 64, - [0][0][1][0][6][14] = 58, - [0][0][1][0][9][14] = 58, - [0][0][1][0][8][14] = 54, - [0][0][1][0][11][14] = 52, - [0][0][1][0][2][15] = 76, - [0][0][1][0][1][15] = 58, - [0][0][1][0][3][15] = 76, - [0][0][1][0][5][15] = 76, - [0][0][1][0][6][15] = 58, - [0][0][1][0][9][15] = 58, - [0][0][1][0][8][15] = 54, - [0][0][1][0][11][15] = 52, - [0][0][1][0][2][17] = 76, - [0][0][1][0][1][17] = 58, - [0][0][1][0][3][17] = 76, - [0][0][1][0][5][17] = 76, - [0][0][1][0][6][17] = 58, - [0][0][1][0][9][17] = 58, - [0][0][1][0][8][17] = 54, - [0][0][1][0][11][17] = 52, - [0][0][1][0][2][19] = 76, - [0][0][1][0][1][19] = 58, - [0][0][1][0][3][19] = 76, - [0][0][1][0][5][19] = 76, - [0][0][1][0][6][19] = 58, - [0][0][1][0][9][19] = 58, - [0][0][1][0][8][19] = 54, - [0][0][1][0][11][19] = 52, - [0][0][1][0][2][21] = 76, - [0][0][1][0][1][21] = 58, - [0][0][1][0][3][21] = 76, - [0][0][1][0][5][21] = 76, - [0][0][1][0][6][21] = 58, - [0][0][1][0][9][21] = 58, - [0][0][1][0][8][21] = 54, - [0][0][1][0][11][21] = 52, - [0][0][1][0][2][23] = 76, - [0][0][1][0][1][23] = 58, - [0][0][1][0][3][23] = 76, - [0][0][1][0][5][23] = 76, - [0][0][1][0][6][23] = 58, - [0][0][1][0][9][23] = 58, - [0][0][1][0][8][23] = 54, - [0][0][1][0][11][23] = 52, - [0][0][1][0][2][25] = 76, - [0][0][1][0][1][25] = 58, - [0][0][1][0][3][25] = 76, - [0][0][1][0][5][25] = 127, - [0][0][1][0][6][25] = 58, - [0][0][1][0][9][25] = 127, - [0][0][1][0][8][25] = 54, - [0][0][1][0][11][25] = 52, - [0][0][1][0][2][27] = 76, - [0][0][1][0][1][27] = 58, - [0][0][1][0][3][27] = 76, - [0][0][1][0][5][27] = 127, - [0][0][1][0][6][27] = 58, - [0][0][1][0][9][27] = 127, - [0][0][1][0][8][27] = 54, - [0][0][1][0][11][27] = 52, - [0][0][1][0][2][29] = 76, - [0][0][1][0][1][29] = 58, - [0][0][1][0][3][29] = 76, - [0][0][1][0][5][29] = 127, - [0][0][1][0][6][29] = 58, - [0][0][1][0][9][29] = 127, - [0][0][1][0][8][29] = 54, - [0][0][1][0][11][29] = 52, - [0][0][1][0][2][31] = 76, - [0][0][1][0][1][31] = 58, - [0][0][1][0][3][31] = 76, - [0][0][1][0][5][31] = 76, - [0][0][1][0][6][31] = 58, - [0][0][1][0][9][31] = 58, - [0][0][1][0][8][31] = 54, - [0][0][1][0][11][31] = 52, - [0][0][1][0][2][33] = 76, - [0][0][1][0][1][33] = 58, - [0][0][1][0][3][33] = 76, - [0][0][1][0][5][33] = 76, - [0][0][1][0][6][33] = 58, - [0][0][1][0][9][33] = 58, - [0][0][1][0][8][33] = 54, - [0][0][1][0][11][33] = 52, - [0][0][1][0][2][35] = 74, - [0][0][1][0][1][35] = 58, - [0][0][1][0][3][35] = 76, - [0][0][1][0][5][35] = 74, - [0][0][1][0][6][35] = 58, - [0][0][1][0][9][35] = 58, - [0][0][1][0][8][35] = 54, - [0][0][1][0][11][35] = 52, - [0][0][1][0][2][37] = 76, - [0][0][1][0][1][37] = 127, - [0][0][1][0][3][37] = 76, - [0][0][1][0][5][37] = 76, - [0][0][1][0][6][37] = 58, - [0][0][1][0][9][37] = 76, - [0][0][1][0][8][37] = 54, - [0][0][1][0][11][37] = 127, - [0][0][1][0][2][38] = 76, - [0][0][1][0][1][38] = 28, - [0][0][1][0][3][38] = 127, - [0][0][1][0][5][38] = 76, - [0][0][1][0][6][38] = 28, - [0][0][1][0][9][38] = 76, - [0][0][1][0][8][38] = 54, - [0][0][1][0][11][38] = 52, - [0][0][1][0][2][40] = 76, - [0][0][1][0][1][40] = 28, - [0][0][1][0][3][40] = 127, - [0][0][1][0][5][40] = 76, - [0][0][1][0][6][40] = 28, - [0][0][1][0][9][40] = 76, - [0][0][1][0][8][40] = 54, - [0][0][1][0][11][40] = 52, - [0][0][1][0][2][42] = 76, - [0][0][1][0][1][42] = 28, - [0][0][1][0][3][42] = 127, - [0][0][1][0][5][42] = 76, - [0][0][1][0][6][42] = 28, - [0][0][1][0][9][42] = 76, - [0][0][1][0][8][42] = 54, - [0][0][1][0][11][42] = 52, - [0][0][1][0][2][44] = 76, - [0][0][1][0][1][44] = 28, - [0][0][1][0][3][44] = 127, - [0][0][1][0][5][44] = 76, - [0][0][1][0][6][44] = 28, - [0][0][1][0][9][44] = 76, - [0][0][1][0][8][44] = 54, - [0][0][1][0][11][44] = 52, - [0][0][1][0][2][46] = 76, - [0][0][1][0][1][46] = 28, - [0][0][1][0][3][46] = 127, - [0][0][1][0][5][46] = 76, - [0][0][1][0][6][46] = 28, - [0][0][1][0][9][46] = 76, - [0][0][1][0][8][46] = 54, - [0][0][1][0][11][46] = 52, - [0][1][1][0][2][0] = 68, - [0][1][1][0][1][0] = 46, - [0][1][1][0][3][0] = 50, - [0][1][1][0][5][0] = 40, - [0][1][1][0][6][0] = 46, - [0][1][1][0][9][0] = 46, - [0][1][1][0][8][0] = 18, - [0][1][1][0][11][0] = 40, - [0][1][1][0][2][2] = 68, - [0][1][1][0][1][2] = 46, - [0][1][1][0][3][2] = 50, - [0][1][1][0][5][2] = 40, - [0][1][1][0][6][2] = 46, - [0][1][1][0][9][2] = 46, - [0][1][1][0][8][2] = 18, - [0][1][1][0][11][2] = 40, - [0][1][1][0][2][4] = 68, - [0][1][1][0][1][4] = 46, - [0][1][1][0][3][4] = 50, - [0][1][1][0][5][4] = 40, - [0][1][1][0][6][4] = 46, - [0][1][1][0][9][4] = 46, - [0][1][1][0][8][4] = 18, - [0][1][1][0][11][4] = 40, - [0][1][1][0][2][6] = 68, - [0][1][1][0][1][6] = 46, - [0][1][1][0][3][6] = 50, - [0][1][1][0][5][6] = 40, - [0][1][1][0][6][6] = 36, - [0][1][1][0][9][6] = 46, - [0][1][1][0][8][6] = 18, - [0][1][1][0][11][6] = 40, - [0][1][1][0][2][8] = 68, - [0][1][1][0][1][8] = 46, - [0][1][1][0][3][8] = 50, - [0][1][1][0][5][8] = 52, - [0][1][1][0][6][8] = 46, - [0][1][1][0][9][8] = 46, - [0][1][1][0][8][8] = 42, - [0][1][1][0][11][8] = 40, - [0][1][1][0][2][10] = 68, - [0][1][1][0][1][10] = 46, - [0][1][1][0][3][10] = 50, - [0][1][1][0][5][10] = 52, - [0][1][1][0][6][10] = 46, - [0][1][1][0][9][10] = 46, - [0][1][1][0][8][10] = 42, - [0][1][1][0][11][10] = 40, - [0][1][1][0][2][12] = 68, - [0][1][1][0][1][12] = 46, - [0][1][1][0][3][12] = 50, - [0][1][1][0][5][12] = 52, - [0][1][1][0][6][12] = 46, - [0][1][1][0][9][12] = 46, - [0][1][1][0][8][12] = 42, - [0][1][1][0][11][12] = 40, - [0][1][1][0][2][14] = 68, - [0][1][1][0][1][14] = 46, - [0][1][1][0][3][14] = 50, - [0][1][1][0][5][14] = 52, - [0][1][1][0][6][14] = 46, - [0][1][1][0][9][14] = 46, - [0][1][1][0][8][14] = 42, - [0][1][1][0][11][14] = 40, - [0][1][1][0][2][15] = 68, - [0][1][1][0][1][15] = 46, - [0][1][1][0][3][15] = 70, - [0][1][1][0][5][15] = 68, - [0][1][1][0][6][15] = 46, - [0][1][1][0][9][15] = 46, - [0][1][1][0][8][15] = 42, - [0][1][1][0][11][15] = 40, - [0][1][1][0][2][17] = 68, - [0][1][1][0][1][17] = 46, - [0][1][1][0][3][17] = 70, - [0][1][1][0][5][17] = 68, - [0][1][1][0][6][17] = 46, - [0][1][1][0][9][17] = 46, - [0][1][1][0][8][17] = 42, - [0][1][1][0][11][17] = 40, - [0][1][1][0][2][19] = 68, - [0][1][1][0][1][19] = 46, - [0][1][1][0][3][19] = 70, - [0][1][1][0][5][19] = 68, - [0][1][1][0][6][19] = 46, - [0][1][1][0][9][19] = 46, - [0][1][1][0][8][19] = 42, - [0][1][1][0][11][19] = 40, - [0][1][1][0][2][21] = 68, - [0][1][1][0][1][21] = 46, - [0][1][1][0][3][21] = 70, - [0][1][1][0][5][21] = 68, - [0][1][1][0][6][21] = 46, - [0][1][1][0][9][21] = 46, - [0][1][1][0][8][21] = 42, - [0][1][1][0][11][21] = 40, - [0][1][1][0][2][23] = 68, - [0][1][1][0][1][23] = 46, - [0][1][1][0][3][23] = 70, - [0][1][1][0][5][23] = 68, - [0][1][1][0][6][23] = 46, - [0][1][1][0][9][23] = 46, - [0][1][1][0][8][23] = 42, - [0][1][1][0][11][23] = 40, - [0][1][1][0][2][25] = 68, - [0][1][1][0][1][25] = 46, - [0][1][1][0][3][25] = 70, - [0][1][1][0][5][25] = 127, - [0][1][1][0][6][25] = 46, - [0][1][1][0][9][25] = 127, - [0][1][1][0][8][25] = 42, - [0][1][1][0][11][25] = 40, - [0][1][1][0][2][27] = 68, - [0][1][1][0][1][27] = 46, - [0][1][1][0][3][27] = 70, - [0][1][1][0][5][27] = 127, - [0][1][1][0][6][27] = 46, - [0][1][1][0][9][27] = 127, - [0][1][1][0][8][27] = 42, - [0][1][1][0][11][27] = 40, - [0][1][1][0][2][29] = 68, - [0][1][1][0][1][29] = 46, - [0][1][1][0][3][29] = 70, - [0][1][1][0][5][29] = 127, - [0][1][1][0][6][29] = 46, - [0][1][1][0][9][29] = 127, - [0][1][1][0][8][29] = 42, - [0][1][1][0][11][29] = 40, - [0][1][1][0][2][31] = 68, - [0][1][1][0][1][31] = 46, - [0][1][1][0][3][31] = 70, - [0][1][1][0][5][31] = 68, - [0][1][1][0][6][31] = 46, - [0][1][1][0][9][31] = 46, - [0][1][1][0][8][31] = 42, - [0][1][1][0][11][31] = 40, - [0][1][1][0][2][33] = 68, - [0][1][1][0][1][33] = 46, - [0][1][1][0][3][33] = 70, - [0][1][1][0][5][33] = 68, - [0][1][1][0][6][33] = 46, - [0][1][1][0][9][33] = 46, - [0][1][1][0][8][33] = 42, - [0][1][1][0][11][33] = 40, - [0][1][1][0][2][35] = 66, - [0][1][1][0][1][35] = 46, - [0][1][1][0][3][35] = 70, - [0][1][1][0][5][35] = 66, - [0][1][1][0][6][35] = 46, - [0][1][1][0][9][35] = 46, - [0][1][1][0][8][35] = 42, - [0][1][1][0][11][35] = 40, - [0][1][1][0][2][37] = 68, - [0][1][1][0][1][37] = 127, - [0][1][1][0][3][37] = 70, - [0][1][1][0][5][37] = 68, - [0][1][1][0][6][37] = 46, - [0][1][1][0][9][37] = 68, - [0][1][1][0][8][37] = 42, - [0][1][1][0][11][37] = 127, - [0][1][1][0][2][38] = 76, - [0][1][1][0][1][38] = 16, - [0][1][1][0][3][38] = 127, - [0][1][1][0][5][38] = 76, - [0][1][1][0][6][38] = 16, - [0][1][1][0][9][38] = 76, - [0][1][1][0][8][38] = 42, - [0][1][1][0][11][38] = 40, - [0][1][1][0][2][40] = 76, - [0][1][1][0][1][40] = 16, - [0][1][1][0][3][40] = 127, - [0][1][1][0][5][40] = 76, - [0][1][1][0][6][40] = 16, - [0][1][1][0][9][40] = 76, - [0][1][1][0][8][40] = 42, - [0][1][1][0][11][40] = 40, - [0][1][1][0][2][42] = 76, - [0][1][1][0][1][42] = 16, - [0][1][1][0][3][42] = 127, - [0][1][1][0][5][42] = 76, - [0][1][1][0][6][42] = 16, - [0][1][1][0][9][42] = 76, - [0][1][1][0][8][42] = 42, - [0][1][1][0][11][42] = 40, - [0][1][1][0][2][44] = 76, - [0][1][1][0][1][44] = 16, - [0][1][1][0][3][44] = 127, - [0][1][1][0][5][44] = 76, - [0][1][1][0][6][44] = 16, - [0][1][1][0][9][44] = 76, - [0][1][1][0][8][44] = 42, - [0][1][1][0][11][44] = 40, - [0][1][1][0][2][46] = 76, - [0][1][1][0][1][46] = 16, - [0][1][1][0][3][46] = 127, - [0][1][1][0][5][46] = 76, - [0][1][1][0][6][46] = 16, - [0][1][1][0][9][46] = 76, - [0][1][1][0][8][46] = 42, - [0][1][1][0][11][46] = 40, - [0][0][2][0][2][0] = 76, - [0][0][2][0][1][0] = 58, - [0][0][2][0][3][0] = 62, - [0][0][2][0][5][0] = 62, - [0][0][2][0][6][0] = 58, - [0][0][2][0][9][0] = 58, - [0][0][2][0][8][0] = 30, - [0][0][2][0][11][0] = 52, - [0][0][2][0][2][2] = 76, - [0][0][2][0][1][2] = 58, - [0][0][2][0][3][2] = 62, - [0][0][2][0][5][2] = 62, - [0][0][2][0][6][2] = 58, - [0][0][2][0][9][2] = 58, - [0][0][2][0][8][2] = 30, - [0][0][2][0][11][2] = 52, - [0][0][2][0][2][4] = 76, - [0][0][2][0][1][4] = 58, - [0][0][2][0][3][4] = 62, - [0][0][2][0][5][4] = 62, - [0][0][2][0][6][4] = 58, - [0][0][2][0][9][4] = 58, - [0][0][2][0][8][4] = 30, - [0][0][2][0][11][4] = 52, - [0][0][2][0][2][6] = 76, - [0][0][2][0][1][6] = 58, - [0][0][2][0][3][6] = 62, - [0][0][2][0][5][6] = 62, - [0][0][2][0][6][6] = 54, - [0][0][2][0][9][6] = 58, - [0][0][2][0][8][6] = 30, - [0][0][2][0][11][6] = 52, - [0][0][2][0][2][8] = 76, - [0][0][2][0][1][8] = 58, - [0][0][2][0][3][8] = 62, - [0][0][2][0][5][8] = 64, - [0][0][2][0][6][8] = 58, - [0][0][2][0][9][8] = 58, - [0][0][2][0][8][8] = 54, - [0][0][2][0][11][8] = 52, - [0][0][2][0][2][10] = 76, - [0][0][2][0][1][10] = 58, - [0][0][2][0][3][10] = 62, - [0][0][2][0][5][10] = 64, - [0][0][2][0][6][10] = 58, - [0][0][2][0][9][10] = 58, - [0][0][2][0][8][10] = 54, - [0][0][2][0][11][10] = 52, - [0][0][2][0][2][12] = 76, - [0][0][2][0][1][12] = 58, - [0][0][2][0][3][12] = 62, - [0][0][2][0][5][12] = 64, - [0][0][2][0][6][12] = 58, - [0][0][2][0][9][12] = 58, - [0][0][2][0][8][12] = 54, - [0][0][2][0][11][12] = 52, - [0][0][2][0][2][14] = 76, - [0][0][2][0][1][14] = 58, - [0][0][2][0][3][14] = 62, - [0][0][2][0][5][14] = 64, - [0][0][2][0][6][14] = 58, - [0][0][2][0][9][14] = 58, - [0][0][2][0][8][14] = 54, - [0][0][2][0][11][14] = 52, - [0][0][2][0][2][15] = 74, - [0][0][2][0][1][15] = 58, - [0][0][2][0][3][15] = 76, - [0][0][2][0][5][15] = 74, - [0][0][2][0][6][15] = 58, - [0][0][2][0][9][15] = 58, - [0][0][2][0][8][15] = 54, - [0][0][2][0][11][15] = 52, - [0][0][2][0][2][17] = 76, - [0][0][2][0][1][17] = 58, - [0][0][2][0][3][17] = 76, - [0][0][2][0][5][17] = 76, - [0][0][2][0][6][17] = 58, - [0][0][2][0][9][17] = 58, - [0][0][2][0][8][17] = 54, - [0][0][2][0][11][17] = 52, - [0][0][2][0][2][19] = 76, - [0][0][2][0][1][19] = 58, - [0][0][2][0][3][19] = 76, - [0][0][2][0][5][19] = 76, - [0][0][2][0][6][19] = 58, - [0][0][2][0][9][19] = 58, - [0][0][2][0][8][19] = 54, - [0][0][2][0][11][19] = 52, - [0][0][2][0][2][21] = 76, - [0][0][2][0][1][21] = 58, - [0][0][2][0][3][21] = 76, - [0][0][2][0][5][21] = 76, - [0][0][2][0][6][21] = 58, - [0][0][2][0][9][21] = 58, - [0][0][2][0][8][21] = 54, - [0][0][2][0][11][21] = 52, - [0][0][2][0][2][23] = 76, - [0][0][2][0][1][23] = 58, - [0][0][2][0][3][23] = 76, - [0][0][2][0][5][23] = 76, - [0][0][2][0][6][23] = 58, - [0][0][2][0][9][23] = 58, - [0][0][2][0][8][23] = 54, - [0][0][2][0][11][23] = 52, - [0][0][2][0][2][25] = 76, - [0][0][2][0][1][25] = 58, - [0][0][2][0][3][25] = 76, - [0][0][2][0][5][25] = 127, - [0][0][2][0][6][25] = 58, - [0][0][2][0][9][25] = 127, - [0][0][2][0][8][25] = 54, - [0][0][2][0][11][25] = 52, - [0][0][2][0][2][27] = 76, - [0][0][2][0][1][27] = 58, - [0][0][2][0][3][27] = 76, - [0][0][2][0][5][27] = 127, - [0][0][2][0][6][27] = 58, - [0][0][2][0][9][27] = 127, - [0][0][2][0][8][27] = 54, - [0][0][2][0][11][27] = 52, - [0][0][2][0][2][29] = 76, - [0][0][2][0][1][29] = 58, - [0][0][2][0][3][29] = 76, - [0][0][2][0][5][29] = 127, - [0][0][2][0][6][29] = 58, - [0][0][2][0][9][29] = 127, - [0][0][2][0][8][29] = 54, - [0][0][2][0][11][29] = 52, - [0][0][2][0][2][31] = 76, - [0][0][2][0][1][31] = 58, - [0][0][2][0][3][31] = 76, - [0][0][2][0][5][31] = 76, - [0][0][2][0][6][31] = 58, - [0][0][2][0][9][31] = 58, - [0][0][2][0][8][31] = 54, - [0][0][2][0][11][31] = 52, - [0][0][2][0][2][33] = 76, - [0][0][2][0][1][33] = 58, - [0][0][2][0][3][33] = 76, - [0][0][2][0][5][33] = 76, - [0][0][2][0][6][33] = 58, - [0][0][2][0][9][33] = 58, - [0][0][2][0][8][33] = 54, - [0][0][2][0][11][33] = 52, - [0][0][2][0][2][35] = 70, - [0][0][2][0][1][35] = 58, - [0][0][2][0][3][35] = 76, - [0][0][2][0][5][35] = 70, - [0][0][2][0][6][35] = 58, - [0][0][2][0][9][35] = 58, - [0][0][2][0][8][35] = 54, - [0][0][2][0][11][35] = 52, - [0][0][2][0][2][37] = 76, - [0][0][2][0][1][37] = 127, - [0][0][2][0][3][37] = 76, - [0][0][2][0][5][37] = 76, - [0][0][2][0][6][37] = 58, - [0][0][2][0][9][37] = 76, - [0][0][2][0][8][37] = 54, - [0][0][2][0][11][37] = 127, - [0][0][2][0][2][38] = 76, - [0][0][2][0][1][38] = 28, - [0][0][2][0][3][38] = 127, - [0][0][2][0][5][38] = 76, - [0][0][2][0][6][38] = 28, - [0][0][2][0][9][38] = 76, - [0][0][2][0][8][38] = 54, - [0][0][2][0][11][38] = 52, - [0][0][2][0][2][40] = 76, - [0][0][2][0][1][40] = 28, - [0][0][2][0][3][40] = 127, - [0][0][2][0][5][40] = 76, - [0][0][2][0][6][40] = 28, - [0][0][2][0][9][40] = 76, - [0][0][2][0][8][40] = 54, - [0][0][2][0][11][40] = 52, - [0][0][2][0][2][42] = 76, - [0][0][2][0][1][42] = 28, - [0][0][2][0][3][42] = 127, - [0][0][2][0][5][42] = 76, - [0][0][2][0][6][42] = 28, - [0][0][2][0][9][42] = 76, - [0][0][2][0][8][42] = 54, - [0][0][2][0][11][42] = 52, - [0][0][2][0][2][44] = 76, - [0][0][2][0][1][44] = 28, - [0][0][2][0][3][44] = 127, - [0][0][2][0][5][44] = 76, - [0][0][2][0][6][44] = 28, - [0][0][2][0][9][44] = 76, - [0][0][2][0][8][44] = 54, - [0][0][2][0][11][44] = 52, - [0][0][2][0][2][46] = 76, - [0][0][2][0][1][46] = 28, - [0][0][2][0][3][46] = 127, - [0][0][2][0][5][46] = 76, - [0][0][2][0][6][46] = 28, - [0][0][2][0][9][46] = 76, - [0][0][2][0][8][46] = 54, - [0][0][2][0][11][46] = 52, - [0][1][2][0][2][0] = 68, - [0][1][2][0][1][0] = 46, - [0][1][2][0][3][0] = 50, - [0][1][2][0][5][0] = 40, - [0][1][2][0][6][0] = 46, - [0][1][2][0][9][0] = 46, - [0][1][2][0][8][0] = 18, - [0][1][2][0][11][0] = 40, - [0][1][2][0][2][2] = 68, - [0][1][2][0][1][2] = 46, - [0][1][2][0][3][2] = 50, - [0][1][2][0][5][2] = 40, - [0][1][2][0][6][2] = 46, - [0][1][2][0][9][2] = 46, - [0][1][2][0][8][2] = 18, - [0][1][2][0][11][2] = 40, - [0][1][2][0][2][4] = 68, - [0][1][2][0][1][4] = 46, - [0][1][2][0][3][4] = 50, - [0][1][2][0][5][4] = 40, - [0][1][2][0][6][4] = 46, - [0][1][2][0][9][4] = 46, - [0][1][2][0][8][4] = 18, - [0][1][2][0][11][4] = 40, - [0][1][2][0][2][6] = 68, - [0][1][2][0][1][6] = 46, - [0][1][2][0][3][6] = 50, - [0][1][2][0][5][6] = 40, - [0][1][2][0][6][6] = 36, - [0][1][2][0][9][6] = 46, - [0][1][2][0][8][6] = 18, - [0][1][2][0][11][6] = 40, - [0][1][2][0][2][8] = 68, - [0][1][2][0][1][8] = 46, - [0][1][2][0][3][8] = 50, - [0][1][2][0][5][8] = 52, - [0][1][2][0][6][8] = 46, - [0][1][2][0][9][8] = 46, - [0][1][2][0][8][8] = 42, - [0][1][2][0][11][8] = 40, - [0][1][2][0][2][10] = 68, - [0][1][2][0][1][10] = 46, - [0][1][2][0][3][10] = 50, - [0][1][2][0][5][10] = 52, - [0][1][2][0][6][10] = 46, - [0][1][2][0][9][10] = 46, - [0][1][2][0][8][10] = 42, - [0][1][2][0][11][10] = 40, - [0][1][2][0][2][12] = 68, - [0][1][2][0][1][12] = 46, - [0][1][2][0][3][12] = 50, - [0][1][2][0][5][12] = 52, - [0][1][2][0][6][12] = 46, - [0][1][2][0][9][12] = 46, - [0][1][2][0][8][12] = 42, - [0][1][2][0][11][12] = 40, - [0][1][2][0][2][14] = 68, - [0][1][2][0][1][14] = 46, - [0][1][2][0][3][14] = 50, - [0][1][2][0][5][14] = 52, - [0][1][2][0][6][14] = 46, - [0][1][2][0][9][14] = 46, - [0][1][2][0][8][14] = 42, - [0][1][2][0][11][14] = 40, - [0][1][2][0][2][15] = 68, - [0][1][2][0][1][15] = 46, - [0][1][2][0][3][15] = 70, - [0][1][2][0][5][15] = 68, - [0][1][2][0][6][15] = 46, - [0][1][2][0][9][15] = 46, - [0][1][2][0][8][15] = 42, - [0][1][2][0][11][15] = 40, - [0][1][2][0][2][17] = 68, - [0][1][2][0][1][17] = 46, - [0][1][2][0][3][17] = 70, - [0][1][2][0][5][17] = 68, - [0][1][2][0][6][17] = 46, - [0][1][2][0][9][17] = 46, - [0][1][2][0][8][17] = 42, - [0][1][2][0][11][17] = 40, - [0][1][2][0][2][19] = 68, - [0][1][2][0][1][19] = 46, - [0][1][2][0][3][19] = 70, - [0][1][2][0][5][19] = 68, - [0][1][2][0][6][19] = 46, - [0][1][2][0][9][19] = 46, - [0][1][2][0][8][19] = 42, - [0][1][2][0][11][19] = 40, - [0][1][2][0][2][21] = 68, - [0][1][2][0][1][21] = 46, - [0][1][2][0][3][21] = 70, - [0][1][2][0][5][21] = 68, - [0][1][2][0][6][21] = 46, - [0][1][2][0][9][21] = 46, - [0][1][2][0][8][21] = 42, - [0][1][2][0][11][21] = 40, - [0][1][2][0][2][23] = 68, - [0][1][2][0][1][23] = 46, - [0][1][2][0][3][23] = 70, - [0][1][2][0][5][23] = 68, - [0][1][2][0][6][23] = 46, - [0][1][2][0][9][23] = 46, - [0][1][2][0][8][23] = 42, - [0][1][2][0][11][23] = 40, - [0][1][2][0][2][25] = 68, - [0][1][2][0][1][25] = 46, - [0][1][2][0][3][25] = 70, - [0][1][2][0][5][25] = 127, - [0][1][2][0][6][25] = 46, - [0][1][2][0][9][25] = 127, - [0][1][2][0][8][25] = 42, - [0][1][2][0][11][25] = 40, - [0][1][2][0][2][27] = 68, - [0][1][2][0][1][27] = 46, - [0][1][2][0][3][27] = 70, - [0][1][2][0][5][27] = 127, - [0][1][2][0][6][27] = 46, - [0][1][2][0][9][27] = 127, - [0][1][2][0][8][27] = 42, - [0][1][2][0][11][27] = 40, - [0][1][2][0][2][29] = 68, - [0][1][2][0][1][29] = 46, - [0][1][2][0][3][29] = 70, - [0][1][2][0][5][29] = 127, - [0][1][2][0][6][29] = 46, - [0][1][2][0][9][29] = 127, - [0][1][2][0][8][29] = 42, - [0][1][2][0][11][29] = 40, - [0][1][2][0][2][31] = 68, - [0][1][2][0][1][31] = 46, - [0][1][2][0][3][31] = 70, - [0][1][2][0][5][31] = 68, - [0][1][2][0][6][31] = 46, - [0][1][2][0][9][31] = 46, - [0][1][2][0][8][31] = 42, - [0][1][2][0][11][31] = 40, - [0][1][2][0][2][33] = 68, - [0][1][2][0][1][33] = 46, - [0][1][2][0][3][33] = 70, - [0][1][2][0][5][33] = 68, - [0][1][2][0][6][33] = 46, - [0][1][2][0][9][33] = 46, - [0][1][2][0][8][33] = 42, - [0][1][2][0][11][33] = 40, - [0][1][2][0][2][35] = 64, - [0][1][2][0][1][35] = 46, - [0][1][2][0][3][35] = 70, - [0][1][2][0][5][35] = 64, - [0][1][2][0][6][35] = 46, - [0][1][2][0][9][35] = 46, - [0][1][2][0][8][35] = 42, - [0][1][2][0][11][35] = 40, - [0][1][2][0][2][37] = 68, - [0][1][2][0][1][37] = 127, - [0][1][2][0][3][37] = 70, - [0][1][2][0][5][37] = 68, - [0][1][2][0][6][37] = 46, - [0][1][2][0][9][37] = 68, - [0][1][2][0][8][37] = 42, - [0][1][2][0][11][37] = 127, - [0][1][2][0][2][38] = 76, - [0][1][2][0][1][38] = 16, - [0][1][2][0][3][38] = 127, - [0][1][2][0][5][38] = 76, - [0][1][2][0][6][38] = 16, - [0][1][2][0][9][38] = 76, - [0][1][2][0][8][38] = 42, - [0][1][2][0][11][38] = 40, - [0][1][2][0][2][40] = 76, - [0][1][2][0][1][40] = 16, - [0][1][2][0][3][40] = 127, - [0][1][2][0][5][40] = 76, - [0][1][2][0][6][40] = 16, - [0][1][2][0][9][40] = 76, - [0][1][2][0][8][40] = 42, - [0][1][2][0][11][40] = 40, - [0][1][2][0][2][42] = 76, - [0][1][2][0][1][42] = 16, - [0][1][2][0][3][42] = 127, - [0][1][2][0][5][42] = 76, - [0][1][2][0][6][42] = 16, - [0][1][2][0][9][42] = 76, - [0][1][2][0][8][42] = 42, - [0][1][2][0][11][42] = 40, - [0][1][2][0][2][44] = 76, - [0][1][2][0][1][44] = 16, - [0][1][2][0][3][44] = 127, - [0][1][2][0][5][44] = 76, - [0][1][2][0][6][44] = 16, - [0][1][2][0][9][44] = 76, - [0][1][2][0][8][44] = 42, - [0][1][2][0][11][44] = 40, - [0][1][2][0][2][46] = 76, - [0][1][2][0][1][46] = 16, - [0][1][2][0][3][46] = 127, - [0][1][2][0][5][46] = 76, - [0][1][2][0][6][46] = 16, - [0][1][2][0][9][46] = 76, - [0][1][2][0][8][46] = 42, - [0][1][2][0][11][46] = 40, - [0][1][2][1][2][0] = 68, - [0][1][2][1][1][0] = 34, - [0][1][2][1][3][0] = 50, - [0][1][2][1][5][0] = 38, - [0][1][2][1][6][0] = 34, - [0][1][2][1][9][0] = 34, - [0][1][2][1][8][0] = 6, - [0][1][2][1][11][0] = 28, - [0][1][2][1][2][2] = 68, - [0][1][2][1][1][2] = 34, - [0][1][2][1][3][2] = 50, - [0][1][2][1][5][2] = 38, - [0][1][2][1][6][2] = 34, - [0][1][2][1][9][2] = 34, - [0][1][2][1][8][2] = 6, - [0][1][2][1][11][2] = 28, - [0][1][2][1][2][4] = 68, - [0][1][2][1][1][4] = 34, - [0][1][2][1][3][4] = 50, - [0][1][2][1][5][4] = 38, - [0][1][2][1][6][4] = 34, - [0][1][2][1][9][4] = 34, - [0][1][2][1][8][4] = 6, - [0][1][2][1][11][4] = 28, - [0][1][2][1][2][6] = 68, - [0][1][2][1][1][6] = 34, - [0][1][2][1][3][6] = 50, - [0][1][2][1][5][6] = 38, - [0][1][2][1][6][6] = 34, - [0][1][2][1][9][6] = 34, - [0][1][2][1][8][6] = 6, - [0][1][2][1][11][6] = 28, - [0][1][2][1][2][8] = 68, - [0][1][2][1][1][8] = 34, - [0][1][2][1][3][8] = 50, - [0][1][2][1][5][8] = 38, - [0][1][2][1][6][8] = 34, - [0][1][2][1][9][8] = 34, - [0][1][2][1][8][8] = 30, - [0][1][2][1][11][8] = 28, - [0][1][2][1][2][10] = 68, - [0][1][2][1][1][10] = 34, - [0][1][2][1][3][10] = 50, - [0][1][2][1][5][10] = 38, - [0][1][2][1][6][10] = 34, - [0][1][2][1][9][10] = 34, - [0][1][2][1][8][10] = 30, - [0][1][2][1][11][10] = 28, - [0][1][2][1][2][12] = 68, - [0][1][2][1][1][12] = 34, - [0][1][2][1][3][12] = 50, - [0][1][2][1][5][12] = 38, - [0][1][2][1][6][12] = 34, - [0][1][2][1][9][12] = 34, - [0][1][2][1][8][12] = 30, - [0][1][2][1][11][12] = 28, - [0][1][2][1][2][14] = 68, - [0][1][2][1][1][14] = 34, - [0][1][2][1][3][14] = 50, - [0][1][2][1][5][14] = 38, - [0][1][2][1][6][14] = 34, - [0][1][2][1][9][14] = 34, - [0][1][2][1][8][14] = 30, - [0][1][2][1][11][14] = 28, - [0][1][2][1][2][15] = 68, - [0][1][2][1][1][15] = 34, - [0][1][2][1][3][15] = 70, - [0][1][2][1][5][15] = 62, - [0][1][2][1][6][15] = 34, - [0][1][2][1][9][15] = 34, - [0][1][2][1][8][15] = 30, - [0][1][2][1][11][15] = 28, - [0][1][2][1][2][17] = 68, - [0][1][2][1][1][17] = 34, - [0][1][2][1][3][17] = 70, - [0][1][2][1][5][17] = 62, - [0][1][2][1][6][17] = 34, - [0][1][2][1][9][17] = 34, - [0][1][2][1][8][17] = 30, - [0][1][2][1][11][17] = 28, - [0][1][2][1][2][19] = 68, - [0][1][2][1][1][19] = 34, - [0][1][2][1][3][19] = 70, - [0][1][2][1][5][19] = 62, - [0][1][2][1][6][19] = 34, - [0][1][2][1][9][19] = 34, - [0][1][2][1][8][19] = 30, - [0][1][2][1][11][19] = 28, - [0][1][2][1][2][21] = 68, - [0][1][2][1][1][21] = 34, - [0][1][2][1][3][21] = 70, - [0][1][2][1][5][21] = 62, - [0][1][2][1][6][21] = 34, - [0][1][2][1][9][21] = 34, - [0][1][2][1][8][21] = 30, - [0][1][2][1][11][21] = 28, - [0][1][2][1][2][23] = 68, - [0][1][2][1][1][23] = 34, - [0][1][2][1][3][23] = 70, - [0][1][2][1][5][23] = 62, - [0][1][2][1][6][23] = 34, - [0][1][2][1][9][23] = 34, - [0][1][2][1][8][23] = 30, - [0][1][2][1][11][23] = 28, - [0][1][2][1][2][25] = 68, - [0][1][2][1][1][25] = 34, - [0][1][2][1][3][25] = 70, - [0][1][2][1][5][25] = 127, - [0][1][2][1][6][25] = 34, - [0][1][2][1][9][25] = 127, - [0][1][2][1][8][25] = 30, - [0][1][2][1][11][25] = 28, - [0][1][2][1][2][27] = 68, - [0][1][2][1][1][27] = 34, - [0][1][2][1][3][27] = 70, - [0][1][2][1][5][27] = 127, - [0][1][2][1][6][27] = 34, - [0][1][2][1][9][27] = 127, - [0][1][2][1][8][27] = 30, - [0][1][2][1][11][27] = 28, - [0][1][2][1][2][29] = 68, - [0][1][2][1][1][29] = 34, - [0][1][2][1][3][29] = 70, - [0][1][2][1][5][29] = 127, - [0][1][2][1][6][29] = 34, - [0][1][2][1][9][29] = 127, - [0][1][2][1][8][29] = 30, - [0][1][2][1][11][29] = 28, - [0][1][2][1][2][31] = 68, - [0][1][2][1][1][31] = 34, - [0][1][2][1][3][31] = 70, - [0][1][2][1][5][31] = 62, - [0][1][2][1][6][31] = 34, - [0][1][2][1][9][31] = 34, - [0][1][2][1][8][31] = 30, - [0][1][2][1][11][31] = 28, - [0][1][2][1][2][33] = 68, - [0][1][2][1][1][33] = 34, - [0][1][2][1][3][33] = 70, - [0][1][2][1][5][33] = 62, - [0][1][2][1][6][33] = 34, - [0][1][2][1][9][33] = 34, - [0][1][2][1][8][33] = 30, - [0][1][2][1][11][33] = 28, - [0][1][2][1][2][35] = 64, - [0][1][2][1][1][35] = 34, - [0][1][2][1][3][35] = 70, - [0][1][2][1][5][35] = 62, - [0][1][2][1][6][35] = 34, - [0][1][2][1][9][35] = 34, - [0][1][2][1][8][35] = 30, - [0][1][2][1][11][35] = 28, - [0][1][2][1][2][37] = 68, - [0][1][2][1][1][37] = 127, - [0][1][2][1][3][37] = 70, - [0][1][2][1][5][37] = 62, - [0][1][2][1][6][37] = 34, - [0][1][2][1][9][37] = 68, - [0][1][2][1][8][37] = 30, - [0][1][2][1][11][37] = 127, - [0][1][2][1][2][38] = 76, - [0][1][2][1][1][38] = 4, - [0][1][2][1][3][38] = 127, - [0][1][2][1][5][38] = 76, - [0][1][2][1][6][38] = 4, - [0][1][2][1][9][38] = 76, - [0][1][2][1][8][38] = 30, - [0][1][2][1][11][38] = 28, - [0][1][2][1][2][40] = 76, - [0][1][2][1][1][40] = 4, - [0][1][2][1][3][40] = 127, - [0][1][2][1][5][40] = 76, - [0][1][2][1][6][40] = 4, - [0][1][2][1][9][40] = 76, - [0][1][2][1][8][40] = 30, - [0][1][2][1][11][40] = 28, - [0][1][2][1][2][42] = 76, - [0][1][2][1][1][42] = 4, - [0][1][2][1][3][42] = 127, - [0][1][2][1][5][42] = 76, - [0][1][2][1][6][42] = 4, - [0][1][2][1][9][42] = 76, - [0][1][2][1][8][42] = 30, - [0][1][2][1][11][42] = 28, - [0][1][2][1][2][44] = 76, - [0][1][2][1][1][44] = 4, - [0][1][2][1][3][44] = 127, - [0][1][2][1][5][44] = 76, - [0][1][2][1][6][44] = 4, - [0][1][2][1][9][44] = 76, - [0][1][2][1][8][44] = 30, - [0][1][2][1][11][44] = 28, - [0][1][2][1][2][46] = 76, - [0][1][2][1][1][46] = 4, - [0][1][2][1][3][46] = 127, - [0][1][2][1][5][46] = 76, - [0][1][2][1][6][46] = 4, - [0][1][2][1][9][46] = 76, - [0][1][2][1][8][46] = 30, - [0][1][2][1][11][46] = 28, - [1][0][2][0][2][1] = 68, - [1][0][2][0][1][1] = 64, - [1][0][2][0][3][1] = 62, - [1][0][2][0][5][1] = 64, - [1][0][2][0][6][1] = 64, - [1][0][2][0][9][1] = 64, - [1][0][2][0][8][1] = 30, - [1][0][2][0][11][1] = 52, - [1][0][2][0][2][5] = 72, - [1][0][2][0][1][5] = 64, - [1][0][2][0][3][5] = 62, - [1][0][2][0][5][5] = 64, - [1][0][2][0][6][5] = 60, - [1][0][2][0][9][5] = 64, - [1][0][2][0][8][5] = 30, - [1][0][2][0][11][5] = 52, - [1][0][2][0][2][9] = 72, - [1][0][2][0][1][9] = 64, - [1][0][2][0][3][9] = 62, - [1][0][2][0][5][9] = 64, - [1][0][2][0][6][9] = 64, - [1][0][2][0][9][9] = 64, - [1][0][2][0][8][9] = 54, - [1][0][2][0][11][9] = 52, - [1][0][2][0][2][13] = 66, - [1][0][2][0][1][13] = 64, - [1][0][2][0][3][13] = 62, - [1][0][2][0][5][13] = 64, - [1][0][2][0][6][13] = 64, - [1][0][2][0][9][13] = 64, - [1][0][2][0][8][13] = 54, - [1][0][2][0][11][13] = 52, - [1][0][2][0][2][16] = 62, - [1][0][2][0][1][16] = 64, - [1][0][2][0][3][16] = 72, - [1][0][2][0][5][16] = 62, - [1][0][2][0][6][16] = 64, - [1][0][2][0][9][16] = 64, - [1][0][2][0][8][16] = 54, - [1][0][2][0][11][16] = 52, - [1][0][2][0][2][20] = 72, - [1][0][2][0][1][20] = 64, - [1][0][2][0][3][20] = 72, - [1][0][2][0][5][20] = 72, - [1][0][2][0][6][20] = 64, - [1][0][2][0][9][20] = 64, - [1][0][2][0][8][20] = 54, - [1][0][2][0][11][20] = 52, - [1][0][2][0][2][24] = 72, - [1][0][2][0][1][24] = 64, - [1][0][2][0][3][24] = 72, - [1][0][2][0][5][24] = 127, - [1][0][2][0][6][24] = 64, - [1][0][2][0][9][24] = 127, - [1][0][2][0][8][24] = 54, - [1][0][2][0][11][24] = 52, - [1][0][2][0][2][28] = 72, - [1][0][2][0][1][28] = 64, - [1][0][2][0][3][28] = 72, - [1][0][2][0][5][28] = 127, - [1][0][2][0][6][28] = 64, - [1][0][2][0][9][28] = 127, - [1][0][2][0][8][28] = 54, - [1][0][2][0][11][28] = 52, - [1][0][2][0][2][32] = 72, - [1][0][2][0][1][32] = 64, - [1][0][2][0][3][32] = 72, - [1][0][2][0][5][32] = 72, - [1][0][2][0][6][32] = 64, - [1][0][2][0][9][32] = 64, - [1][0][2][0][8][32] = 54, - [1][0][2][0][11][32] = 52, - [1][0][2][0][2][36] = 72, - [1][0][2][0][1][36] = 127, - [1][0][2][0][3][36] = 72, - [1][0][2][0][5][36] = 72, - [1][0][2][0][6][36] = 64, - [1][0][2][0][9][36] = 72, - [1][0][2][0][8][36] = 54, - [1][0][2][0][11][36] = 127, - [1][0][2][0][2][39] = 72, - [1][0][2][0][1][39] = 28, - [1][0][2][0][3][39] = 127, - [1][0][2][0][5][39] = 72, - [1][0][2][0][6][39] = 28, - [1][0][2][0][9][39] = 72, - [1][0][2][0][8][39] = 54, - [1][0][2][0][11][39] = 52, - [1][0][2][0][2][43] = 72, - [1][0][2][0][1][43] = 28, - [1][0][2][0][3][43] = 127, - [1][0][2][0][5][43] = 72, - [1][0][2][0][6][43] = 28, - [1][0][2][0][9][43] = 72, - [1][0][2][0][8][43] = 54, - [1][0][2][0][11][43] = 52, - [1][1][2][0][2][1] = 58, - [1][1][2][0][1][1] = 52, - [1][1][2][0][3][1] = 50, - [1][1][2][0][5][1] = 52, - [1][1][2][0][6][1] = 52, - [1][1][2][0][9][1] = 52, - [1][1][2][0][8][1] = 18, - [1][1][2][0][11][1] = 40, - [1][1][2][0][2][5] = 72, - [1][1][2][0][1][5] = 52, - [1][1][2][0][3][5] = 50, - [1][1][2][0][5][5] = 52, - [1][1][2][0][6][5] = 46, - [1][1][2][0][9][5] = 52, - [1][1][2][0][8][5] = 18, - [1][1][2][0][11][5] = 40, - [1][1][2][0][2][9] = 72, - [1][1][2][0][1][9] = 52, - [1][1][2][0][3][9] = 50, - [1][1][2][0][5][9] = 52, - [1][1][2][0][6][9] = 52, - [1][1][2][0][9][9] = 52, - [1][1][2][0][8][9] = 42, - [1][1][2][0][11][9] = 40, - [1][1][2][0][2][13] = 58, - [1][1][2][0][1][13] = 52, - [1][1][2][0][3][13] = 50, - [1][1][2][0][5][13] = 52, - [1][1][2][0][6][13] = 52, - [1][1][2][0][9][13] = 52, - [1][1][2][0][8][13] = 42, - [1][1][2][0][11][13] = 40, - [1][1][2][0][2][16] = 56, - [1][1][2][0][1][16] = 52, - [1][1][2][0][3][16] = 72, - [1][1][2][0][5][16] = 56, - [1][1][2][0][6][16] = 52, - [1][1][2][0][9][16] = 52, - [1][1][2][0][8][16] = 42, - [1][1][2][0][11][16] = 40, - [1][1][2][0][2][20] = 72, - [1][1][2][0][1][20] = 52, - [1][1][2][0][3][20] = 72, - [1][1][2][0][5][20] = 72, - [1][1][2][0][6][20] = 52, - [1][1][2][0][9][20] = 52, - [1][1][2][0][8][20] = 42, - [1][1][2][0][11][20] = 40, - [1][1][2][0][2][24] = 72, - [1][1][2][0][1][24] = 52, - [1][1][2][0][3][24] = 72, - [1][1][2][0][5][24] = 127, - [1][1][2][0][6][24] = 52, - [1][1][2][0][9][24] = 127, - [1][1][2][0][8][24] = 42, - [1][1][2][0][11][24] = 40, - [1][1][2][0][2][28] = 72, - [1][1][2][0][1][28] = 52, - [1][1][2][0][3][28] = 72, - [1][1][2][0][5][28] = 127, - [1][1][2][0][6][28] = 52, - [1][1][2][0][9][28] = 127, - [1][1][2][0][8][28] = 42, - [1][1][2][0][11][28] = 40, - [1][1][2][0][2][32] = 68, - [1][1][2][0][1][32] = 52, - [1][1][2][0][3][32] = 72, - [1][1][2][0][5][32] = 68, - [1][1][2][0][6][32] = 52, - [1][1][2][0][9][32] = 52, - [1][1][2][0][8][32] = 42, - [1][1][2][0][11][32] = 40, - [1][1][2][0][2][36] = 72, - [1][1][2][0][1][36] = 127, - [1][1][2][0][3][36] = 72, - [1][1][2][0][5][36] = 72, - [1][1][2][0][6][36] = 52, - [1][1][2][0][9][36] = 72, - [1][1][2][0][8][36] = 42, - [1][1][2][0][11][36] = 127, - [1][1][2][0][2][39] = 72, - [1][1][2][0][1][39] = 16, - [1][1][2][0][3][39] = 127, - [1][1][2][0][5][39] = 72, - [1][1][2][0][6][39] = 16, - [1][1][2][0][9][39] = 72, - [1][1][2][0][8][39] = 42, - [1][1][2][0][11][39] = 40, - [1][1][2][0][2][43] = 72, - [1][1][2][0][1][43] = 16, - [1][1][2][0][3][43] = 127, - [1][1][2][0][5][43] = 72, - [1][1][2][0][6][43] = 16, - [1][1][2][0][9][43] = 72, - [1][1][2][0][8][43] = 42, - [1][1][2][0][11][43] = 40, - [1][1][2][1][2][1] = 58, - [1][1][2][1][1][1] = 40, - [1][1][2][1][3][1] = 50, - [1][1][2][1][5][1] = 40, - [1][1][2][1][6][1] = 40, - [1][1][2][1][9][1] = 40, - [1][1][2][1][8][1] = 6, - [1][1][2][1][11][1] = 28, - [1][1][2][1][2][5] = 68, - [1][1][2][1][1][5] = 40, - [1][1][2][1][3][5] = 50, - [1][1][2][1][5][5] = 40, - [1][1][2][1][6][5] = 40, - [1][1][2][1][9][5] = 40, - [1][1][2][1][8][5] = 6, - [1][1][2][1][11][5] = 28, - [1][1][2][1][2][9] = 68, - [1][1][2][1][1][9] = 40, - [1][1][2][1][3][9] = 50, - [1][1][2][1][5][9] = 40, - [1][1][2][1][6][9] = 40, - [1][1][2][1][9][9] = 40, - [1][1][2][1][8][9] = 30, - [1][1][2][1][11][9] = 28, - [1][1][2][1][2][13] = 58, - [1][1][2][1][1][13] = 40, - [1][1][2][1][3][13] = 50, - [1][1][2][1][5][13] = 40, - [1][1][2][1][6][13] = 40, - [1][1][2][1][9][13] = 40, - [1][1][2][1][8][13] = 30, - [1][1][2][1][11][13] = 28, - [1][1][2][1][2][16] = 56, - [1][1][2][1][1][16] = 40, - [1][1][2][1][3][16] = 72, - [1][1][2][1][5][16] = 56, - [1][1][2][1][6][16] = 40, - [1][1][2][1][9][16] = 40, - [1][1][2][1][8][16] = 30, - [1][1][2][1][11][16] = 28, - [1][1][2][1][2][20] = 68, - [1][1][2][1][1][20] = 40, - [1][1][2][1][3][20] = 72, - [1][1][2][1][5][20] = 68, - [1][1][2][1][6][20] = 40, - [1][1][2][1][9][20] = 40, - [1][1][2][1][8][20] = 30, - [1][1][2][1][11][20] = 28, - [1][1][2][1][2][24] = 68, - [1][1][2][1][1][24] = 40, - [1][1][2][1][3][24] = 72, - [1][1][2][1][5][24] = 127, - [1][1][2][1][6][24] = 40, - [1][1][2][1][9][24] = 127, - [1][1][2][1][8][24] = 30, - [1][1][2][1][11][24] = 28, - [1][1][2][1][2][28] = 68, - [1][1][2][1][1][28] = 40, - [1][1][2][1][3][28] = 72, - [1][1][2][1][5][28] = 127, - [1][1][2][1][6][28] = 40, - [1][1][2][1][9][28] = 127, - [1][1][2][1][8][28] = 30, - [1][1][2][1][11][28] = 28, - [1][1][2][1][2][32] = 68, - [1][1][2][1][1][32] = 40, - [1][1][2][1][3][32] = 72, - [1][1][2][1][5][32] = 68, - [1][1][2][1][6][32] = 40, - [1][1][2][1][9][32] = 40, - [1][1][2][1][8][32] = 30, - [1][1][2][1][11][32] = 28, - [1][1][2][1][2][36] = 68, - [1][1][2][1][1][36] = 127, - [1][1][2][1][3][36] = 72, - [1][1][2][1][5][36] = 68, - [1][1][2][1][6][36] = 40, - [1][1][2][1][9][36] = 68, - [1][1][2][1][8][36] = 30, - [1][1][2][1][11][36] = 127, - [1][1][2][1][2][39] = 72, - [1][1][2][1][1][39] = 4, - [1][1][2][1][3][39] = 127, - [1][1][2][1][5][39] = 72, - [1][1][2][1][6][39] = 4, - [1][1][2][1][9][39] = 72, - [1][1][2][1][8][39] = 30, - [1][1][2][1][11][39] = 28, - [1][1][2][1][2][43] = 72, - [1][1][2][1][1][43] = 4, - [1][1][2][1][3][43] = 127, - [1][1][2][1][5][43] = 72, - [1][1][2][1][6][43] = 4, - [1][1][2][1][9][43] = 72, - [1][1][2][1][8][43] = 30, - [1][1][2][1][11][43] = 28, - [2][0][2][0][2][3] = 64, - [2][0][2][0][1][3] = 64, - [2][0][2][0][3][3] = 64, - [2][0][2][0][5][3] = 62, - [2][0][2][0][6][3] = 64, - [2][0][2][0][9][3] = 64, - [2][0][2][0][8][3] = 30, - [2][0][2][0][11][3] = 52, - [2][0][2][0][2][11] = 64, - [2][0][2][0][1][11] = 64, - [2][0][2][0][3][11] = 64, - [2][0][2][0][5][11] = 62, - [2][0][2][0][6][11] = 64, - [2][0][2][0][9][11] = 64, - [2][0][2][0][8][11] = 54, - [2][0][2][0][11][11] = 52, - [2][0][2][0][2][18] = 62, - [2][0][2][0][1][18] = 64, - [2][0][2][0][3][18] = 72, - [2][0][2][0][5][18] = 66, - [2][0][2][0][6][18] = 64, - [2][0][2][0][9][18] = 64, - [2][0][2][0][8][18] = 54, - [2][0][2][0][11][18] = 52, - [2][0][2][0][2][26] = 72, - [2][0][2][0][1][26] = 64, - [2][0][2][0][3][26] = 72, - [2][0][2][0][5][26] = 127, - [2][0][2][0][6][26] = 64, - [2][0][2][0][9][26] = 127, - [2][0][2][0][8][26] = 54, - [2][0][2][0][11][26] = 52, - [2][0][2][0][2][34] = 72, - [2][0][2][0][1][34] = 127, - [2][0][2][0][3][34] = 72, - [2][0][2][0][5][34] = 72, - [2][0][2][0][6][34] = 64, - [2][0][2][0][9][34] = 72, - [2][0][2][0][8][34] = 54, - [2][0][2][0][11][34] = 127, - [2][0][2][0][2][41] = 72, - [2][0][2][0][1][41] = 28, - [2][0][2][0][3][41] = 127, - [2][0][2][0][5][41] = 72, - [2][0][2][0][6][41] = 28, - [2][0][2][0][9][41] = 72, - [2][0][2][0][8][41] = 54, - [2][0][2][0][11][41] = 52, - [2][1][2][0][2][3] = 56, - [2][1][2][0][1][3] = 52, - [2][1][2][0][3][3] = 52, - [2][1][2][0][5][3] = 52, - [2][1][2][0][6][3] = 52, - [2][1][2][0][9][3] = 52, - [2][1][2][0][8][3] = 18, - [2][1][2][0][11][3] = 40, - [2][1][2][0][2][11] = 56, - [2][1][2][0][1][11] = 52, - [2][1][2][0][3][11] = 52, - [2][1][2][0][5][11] = 52, - [2][1][2][0][6][11] = 52, - [2][1][2][0][9][11] = 52, - [2][1][2][0][8][11] = 42, - [2][1][2][0][11][11] = 40, - [2][1][2][0][2][18] = 56, - [2][1][2][0][1][18] = 52, - [2][1][2][0][3][18] = 72, - [2][1][2][0][5][18] = 56, - [2][1][2][0][6][18] = 52, - [2][1][2][0][9][18] = 52, - [2][1][2][0][8][18] = 42, - [2][1][2][0][11][18] = 40, - [2][1][2][0][2][26] = 72, - [2][1][2][0][1][26] = 52, - [2][1][2][0][3][26] = 72, - [2][1][2][0][5][26] = 127, - [2][1][2][0][6][26] = 52, - [2][1][2][0][9][26] = 127, - [2][1][2][0][8][26] = 42, - [2][1][2][0][11][26] = 40, - [2][1][2][0][2][34] = 72, - [2][1][2][0][1][34] = 127, - [2][1][2][0][3][34] = 72, - [2][1][2][0][5][34] = 72, - [2][1][2][0][6][34] = 52, - [2][1][2][0][9][34] = 72, - [2][1][2][0][8][34] = 42, - [2][1][2][0][11][34] = 127, - [2][1][2][0][2][41] = 72, - [2][1][2][0][1][41] = 16, - [2][1][2][0][3][41] = 127, - [2][1][2][0][5][41] = 72, - [2][1][2][0][6][41] = 16, - [2][1][2][0][9][41] = 72, - [2][1][2][0][8][41] = 42, - [2][1][2][0][11][41] = 40, - [2][1][2][1][2][3] = 56, - [2][1][2][1][1][3] = 40, - [2][1][2][1][3][3] = 52, - [2][1][2][1][5][3] = 40, - [2][1][2][1][6][3] = 40, - [2][1][2][1][9][3] = 40, - [2][1][2][1][8][3] = 6, - [2][1][2][1][11][3] = 28, - [2][1][2][1][2][11] = 56, - [2][1][2][1][1][11] = 40, - [2][1][2][1][3][11] = 52, - [2][1][2][1][5][11] = 40, - [2][1][2][1][6][11] = 40, - [2][1][2][1][9][11] = 40, - [2][1][2][1][8][11] = 30, - [2][1][2][1][11][11] = 28, - [2][1][2][1][2][18] = 56, - [2][1][2][1][1][18] = 40, - [2][1][2][1][3][18] = 72, - [2][1][2][1][5][18] = 56, - [2][1][2][1][6][18] = 40, - [2][1][2][1][9][18] = 40, - [2][1][2][1][8][18] = 30, - [2][1][2][1][11][18] = 28, - [2][1][2][1][2][26] = 68, - [2][1][2][1][1][26] = 40, - [2][1][2][1][3][26] = 72, - [2][1][2][1][5][26] = 127, - [2][1][2][1][6][26] = 40, - [2][1][2][1][9][26] = 127, - [2][1][2][1][8][26] = 30, - [2][1][2][1][11][26] = 28, - [2][1][2][1][2][34] = 68, - [2][1][2][1][1][34] = 127, - [2][1][2][1][3][34] = 72, - [2][1][2][1][5][34] = 68, - [2][1][2][1][6][34] = 40, - [2][1][2][1][9][34] = 68, - [2][1][2][1][8][34] = 30, - [2][1][2][1][11][34] = 127, - [2][1][2][1][2][41] = 72, - [2][1][2][1][1][41] = 4, - [2][1][2][1][3][41] = 127, - [2][1][2][1][5][41] = 72, - [2][1][2][1][6][41] = 4, - [2][1][2][1][9][41] = 72, - [2][1][2][1][8][41] = 30, - [2][1][2][1][11][41] = 28, + [0][0][1][0][RTW89_WW][0] = 30, + [0][0][1][0][RTW89_WW][2] = 30, + [0][0][1][0][RTW89_WW][4] = 30, + [0][0][1][0][RTW89_WW][6] = 30, + [0][0][1][0][RTW89_WW][8] = 52, + [0][0][1][0][RTW89_WW][10] = 52, + [0][0][1][0][RTW89_WW][12] = 52, + [0][0][1][0][RTW89_WW][14] = 52, + [0][0][1][0][RTW89_WW][15] = 52, + [0][0][1][0][RTW89_WW][17] = 52, + [0][0][1][0][RTW89_WW][19] = 52, + [0][0][1][0][RTW89_WW][21] = 52, + [0][0][1][0][RTW89_WW][23] = 52, + [0][0][1][0][RTW89_WW][25] = 52, + [0][0][1][0][RTW89_WW][27] = 52, + [0][0][1][0][RTW89_WW][29] = 52, + [0][0][1][0][RTW89_WW][31] = 52, + [0][0][1][0][RTW89_WW][33] = 52, + [0][0][1][0][RTW89_WW][35] = 52, + [0][0][1][0][RTW89_WW][37] = 54, + [0][0][1][0][RTW89_WW][38] = 28, + [0][0][1][0][RTW89_WW][40] = 28, + [0][0][1][0][RTW89_WW][42] = 28, + [0][0][1][0][RTW89_WW][44] = 28, + [0][0][1][0][RTW89_WW][46] = 28, + [0][1][1][0][RTW89_WW][0] = 18, + [0][1][1][0][RTW89_WW][2] = 18, + [0][1][1][0][RTW89_WW][4] = 18, + [0][1][1][0][RTW89_WW][6] = 18, + [0][1][1][0][RTW89_WW][8] = 40, + [0][1][1][0][RTW89_WW][10] = 40, + [0][1][1][0][RTW89_WW][12] = 40, + [0][1][1][0][RTW89_WW][14] = 40, + [0][1][1][0][RTW89_WW][15] = 40, + [0][1][1][0][RTW89_WW][17] = 40, + [0][1][1][0][RTW89_WW][19] = 40, + [0][1][1][0][RTW89_WW][21] = 40, + [0][1][1][0][RTW89_WW][23] = 40, + [0][1][1][0][RTW89_WW][25] = 40, + [0][1][1][0][RTW89_WW][27] = 40, + [0][1][1][0][RTW89_WW][29] = 40, + [0][1][1][0][RTW89_WW][31] = 40, + [0][1][1][0][RTW89_WW][33] = 40, + [0][1][1][0][RTW89_WW][35] = 40, + [0][1][1][0][RTW89_WW][37] = 42, + [0][1][1][0][RTW89_WW][38] = 16, + [0][1][1][0][RTW89_WW][40] = 16, + [0][1][1][0][RTW89_WW][42] = 16, + [0][1][1][0][RTW89_WW][44] = 16, + [0][1][1][0][RTW89_WW][46] = 16, + [0][0][2][0][RTW89_WW][0] = 30, + [0][0][2][0][RTW89_WW][2] = 30, + [0][0][2][0][RTW89_WW][4] = 30, + [0][0][2][0][RTW89_WW][6] = 30, + [0][0][2][0][RTW89_WW][8] = 52, + [0][0][2][0][RTW89_WW][10] = 52, + [0][0][2][0][RTW89_WW][12] = 52, + [0][0][2][0][RTW89_WW][14] = 52, + [0][0][2][0][RTW89_WW][15] = 52, + [0][0][2][0][RTW89_WW][17] = 52, + [0][0][2][0][RTW89_WW][19] = 52, + [0][0][2][0][RTW89_WW][21] = 52, + [0][0][2][0][RTW89_WW][23] = 52, + [0][0][2][0][RTW89_WW][25] = 52, + [0][0][2][0][RTW89_WW][27] = 52, + [0][0][2][0][RTW89_WW][29] = 52, + [0][0][2][0][RTW89_WW][31] = 52, + [0][0][2][0][RTW89_WW][33] = 52, + [0][0][2][0][RTW89_WW][35] = 52, + [0][0][2][0][RTW89_WW][37] = 54, + [0][0][2][0][RTW89_WW][38] = 28, + [0][0][2][0][RTW89_WW][40] = 28, + [0][0][2][0][RTW89_WW][42] = 28, + [0][0][2][0][RTW89_WW][44] = 28, + [0][0][2][0][RTW89_WW][46] = 28, + [0][1][2][0][RTW89_WW][0] = 18, + [0][1][2][0][RTW89_WW][2] = 18, + [0][1][2][0][RTW89_WW][4] = 18, + [0][1][2][0][RTW89_WW][6] = 18, + [0][1][2][0][RTW89_WW][8] = 40, + [0][1][2][0][RTW89_WW][10] = 40, + [0][1][2][0][RTW89_WW][12] = 40, + [0][1][2][0][RTW89_WW][14] = 40, + [0][1][2][0][RTW89_WW][15] = 40, + [0][1][2][0][RTW89_WW][17] = 40, + [0][1][2][0][RTW89_WW][19] = 40, + [0][1][2][0][RTW89_WW][21] = 40, + [0][1][2][0][RTW89_WW][23] = 40, + [0][1][2][0][RTW89_WW][25] = 40, + [0][1][2][0][RTW89_WW][27] = 40, + [0][1][2][0][RTW89_WW][29] = 40, + [0][1][2][0][RTW89_WW][31] = 40, + [0][1][2][0][RTW89_WW][33] = 40, + [0][1][2][0][RTW89_WW][35] = 40, + [0][1][2][0][RTW89_WW][37] = 42, + [0][1][2][0][RTW89_WW][38] = 16, + [0][1][2][0][RTW89_WW][40] = 16, + [0][1][2][0][RTW89_WW][42] = 16, + [0][1][2][0][RTW89_WW][44] = 16, + [0][1][2][0][RTW89_WW][46] = 16, + [0][1][2][1][RTW89_WW][0] = 6, + [0][1][2][1][RTW89_WW][2] = 6, + [0][1][2][1][RTW89_WW][4] = 6, + [0][1][2][1][RTW89_WW][6] = 6, + [0][1][2][1][RTW89_WW][8] = 28, + [0][1][2][1][RTW89_WW][10] = 28, + [0][1][2][1][RTW89_WW][12] = 28, + [0][1][2][1][RTW89_WW][14] = 28, + [0][1][2][1][RTW89_WW][15] = 28, + [0][1][2][1][RTW89_WW][17] = 28, + [0][1][2][1][RTW89_WW][19] = 28, + [0][1][2][1][RTW89_WW][21] = 28, + [0][1][2][1][RTW89_WW][23] = 28, + [0][1][2][1][RTW89_WW][25] = 28, + [0][1][2][1][RTW89_WW][27] = 28, + [0][1][2][1][RTW89_WW][29] = 28, + [0][1][2][1][RTW89_WW][31] = 28, + [0][1][2][1][RTW89_WW][33] = 28, + [0][1][2][1][RTW89_WW][35] = 28, + [0][1][2][1][RTW89_WW][37] = 30, + [0][1][2][1][RTW89_WW][38] = 4, + [0][1][2][1][RTW89_WW][40] = 4, + [0][1][2][1][RTW89_WW][42] = 4, + [0][1][2][1][RTW89_WW][44] = 4, + [0][1][2][1][RTW89_WW][46] = 4, + [1][0][2][0][RTW89_WW][1] = 30, + [1][0][2][0][RTW89_WW][5] = 30, + [1][0][2][0][RTW89_WW][9] = 52, + [1][0][2][0][RTW89_WW][13] = 52, + [1][0][2][0][RTW89_WW][16] = 52, + [1][0][2][0][RTW89_WW][20] = 52, + [1][0][2][0][RTW89_WW][24] = 52, + [1][0][2][0][RTW89_WW][28] = 52, + [1][0][2][0][RTW89_WW][32] = 52, + [1][0][2][0][RTW89_WW][36] = 54, + [1][0][2][0][RTW89_WW][39] = 28, + [1][0][2][0][RTW89_WW][43] = 28, + [1][1][2][0][RTW89_WW][1] = 18, + [1][1][2][0][RTW89_WW][5] = 18, + [1][1][2][0][RTW89_WW][9] = 40, + [1][1][2][0][RTW89_WW][13] = 40, + [1][1][2][0][RTW89_WW][16] = 40, + [1][1][2][0][RTW89_WW][20] = 40, + [1][1][2][0][RTW89_WW][24] = 40, + [1][1][2][0][RTW89_WW][28] = 40, + [1][1][2][0][RTW89_WW][32] = 40, + [1][1][2][0][RTW89_WW][36] = 42, + [1][1][2][0][RTW89_WW][39] = 16, + [1][1][2][0][RTW89_WW][43] = 16, + [1][1][2][1][RTW89_WW][1] = 6, + [1][1][2][1][RTW89_WW][5] = 6, + [1][1][2][1][RTW89_WW][9] = 28, + [1][1][2][1][RTW89_WW][13] = 28, + [1][1][2][1][RTW89_WW][16] = 28, + [1][1][2][1][RTW89_WW][20] = 28, + [1][1][2][1][RTW89_WW][24] = 28, + [1][1][2][1][RTW89_WW][28] = 28, + [1][1][2][1][RTW89_WW][32] = 28, + [1][1][2][1][RTW89_WW][36] = 30, + [1][1][2][1][RTW89_WW][39] = 4, + [1][1][2][1][RTW89_WW][43] = 4, + [2][0][2][0][RTW89_WW][3] = 30, + [2][0][2][0][RTW89_WW][11] = 52, + [2][0][2][0][RTW89_WW][18] = 52, + [2][0][2][0][RTW89_WW][26] = 52, + [2][0][2][0][RTW89_WW][34] = 54, + [2][0][2][0][RTW89_WW][41] = 28, + [2][1][2][0][RTW89_WW][3] = 18, + [2][1][2][0][RTW89_WW][11] = 40, + [2][1][2][0][RTW89_WW][18] = 40, + [2][1][2][0][RTW89_WW][26] = 40, + [2][1][2][0][RTW89_WW][34] = 42, + [2][1][2][0][RTW89_WW][41] = 16, + [2][1][2][1][RTW89_WW][3] = 6, + [2][1][2][1][RTW89_WW][11] = 28, + [2][1][2][1][RTW89_WW][18] = 28, + [2][1][2][1][RTW89_WW][26] = 28, + [2][1][2][1][RTW89_WW][34] = 30, + [2][1][2][1][RTW89_WW][41] = 4, + [0][0][1][0][RTW89_FCC][0] = 76, + [0][0][1][0][RTW89_ETSI][0] = 58, + [0][0][1][0][RTW89_MKK][0] = 62, + [0][0][1][0][RTW89_IC][0] = 62, + [0][0][1][0][RTW89_KCC][0] = 76, + [0][0][1][0][RTW89_ACMA][0] = 58, + [0][0][1][0][RTW89_CHILE][0] = 30, + [0][0][1][0][RTW89_UKRAINE][0] = 52, + [0][0][1][0][RTW89_MEXICO][0] = 62, + [0][0][1][0][RTW89_CN][0] = 58, + [0][0][1][0][RTW89_QATAR][0] = 58, + [0][0][1][0][RTW89_FCC][2] = 76, + [0][0][1][0][RTW89_ETSI][2] = 58, + [0][0][1][0][RTW89_MKK][2] = 62, + [0][0][1][0][RTW89_IC][2] = 62, + [0][0][1][0][RTW89_KCC][2] = 76, + [0][0][1][0][RTW89_ACMA][2] = 58, + [0][0][1][0][RTW89_CHILE][2] = 30, + [0][0][1][0][RTW89_UKRAINE][2] = 52, + [0][0][1][0][RTW89_MEXICO][2] = 62, + [0][0][1][0][RTW89_CN][2] = 58, + [0][0][1][0][RTW89_QATAR][2] = 58, + [0][0][1][0][RTW89_FCC][4] = 76, + [0][0][1][0][RTW89_ETSI][4] = 58, + [0][0][1][0][RTW89_MKK][4] = 62, + [0][0][1][0][RTW89_IC][4] = 62, + [0][0][1][0][RTW89_KCC][4] = 76, + [0][0][1][0][RTW89_ACMA][4] = 58, + [0][0][1][0][RTW89_CHILE][4] = 30, + [0][0][1][0][RTW89_UKRAINE][4] = 52, + [0][0][1][0][RTW89_MEXICO][4] = 62, + [0][0][1][0][RTW89_CN][4] = 58, + [0][0][1][0][RTW89_QATAR][4] = 58, + [0][0][1][0][RTW89_FCC][6] = 76, + [0][0][1][0][RTW89_ETSI][6] = 58, + [0][0][1][0][RTW89_MKK][6] = 62, + [0][0][1][0][RTW89_IC][6] = 62, + [0][0][1][0][RTW89_KCC][6] = 58, + [0][0][1][0][RTW89_ACMA][6] = 58, + [0][0][1][0][RTW89_CHILE][6] = 30, + [0][0][1][0][RTW89_UKRAINE][6] = 52, + [0][0][1][0][RTW89_MEXICO][6] = 62, + [0][0][1][0][RTW89_CN][6] = 58, + [0][0][1][0][RTW89_QATAR][6] = 58, + [0][0][1][0][RTW89_FCC][8] = 76, + [0][0][1][0][RTW89_ETSI][8] = 58, + [0][0][1][0][RTW89_MKK][8] = 62, + [0][0][1][0][RTW89_IC][8] = 64, + [0][0][1][0][RTW89_KCC][8] = 76, + [0][0][1][0][RTW89_ACMA][8] = 58, + [0][0][1][0][RTW89_CHILE][8] = 54, + [0][0][1][0][RTW89_UKRAINE][8] = 52, + [0][0][1][0][RTW89_MEXICO][8] = 76, + [0][0][1][0][RTW89_CN][8] = 58, + [0][0][1][0][RTW89_QATAR][8] = 58, + [0][0][1][0][RTW89_FCC][10] = 76, + [0][0][1][0][RTW89_ETSI][10] = 58, + [0][0][1][0][RTW89_MKK][10] = 62, + [0][0][1][0][RTW89_IC][10] = 64, + [0][0][1][0][RTW89_KCC][10] = 76, + [0][0][1][0][RTW89_ACMA][10] = 58, + [0][0][1][0][RTW89_CHILE][10] = 54, + [0][0][1][0][RTW89_UKRAINE][10] = 52, + [0][0][1][0][RTW89_MEXICO][10] = 76, + [0][0][1][0][RTW89_CN][10] = 58, + [0][0][1][0][RTW89_QATAR][10] = 58, + [0][0][1][0][RTW89_FCC][12] = 76, + [0][0][1][0][RTW89_ETSI][12] = 58, + [0][0][1][0][RTW89_MKK][12] = 62, + [0][0][1][0][RTW89_IC][12] = 64, + [0][0][1][0][RTW89_KCC][12] = 76, + [0][0][1][0][RTW89_ACMA][12] = 58, + [0][0][1][0][RTW89_CHILE][12] = 54, + [0][0][1][0][RTW89_UKRAINE][12] = 52, + [0][0][1][0][RTW89_MEXICO][12] = 76, + [0][0][1][0][RTW89_CN][12] = 58, + [0][0][1][0][RTW89_QATAR][12] = 58, + [0][0][1][0][RTW89_FCC][14] = 76, + [0][0][1][0][RTW89_ETSI][14] = 58, + [0][0][1][0][RTW89_MKK][14] = 62, + [0][0][1][0][RTW89_IC][14] = 64, + [0][0][1][0][RTW89_KCC][14] = 76, + [0][0][1][0][RTW89_ACMA][14] = 58, + [0][0][1][0][RTW89_CHILE][14] = 54, + [0][0][1][0][RTW89_UKRAINE][14] = 52, + [0][0][1][0][RTW89_MEXICO][14] = 76, + [0][0][1][0][RTW89_CN][14] = 58, + [0][0][1][0][RTW89_QATAR][14] = 58, + [0][0][1][0][RTW89_FCC][15] = 76, + [0][0][1][0][RTW89_ETSI][15] = 58, + [0][0][1][0][RTW89_MKK][15] = 76, + [0][0][1][0][RTW89_IC][15] = 76, + [0][0][1][0][RTW89_KCC][15] = 76, + [0][0][1][0][RTW89_ACMA][15] = 58, + [0][0][1][0][RTW89_CHILE][15] = 54, + [0][0][1][0][RTW89_UKRAINE][15] = 52, + [0][0][1][0][RTW89_MEXICO][15] = 76, + [0][0][1][0][RTW89_CN][15] = 127, + [0][0][1][0][RTW89_QATAR][15] = 52, + [0][0][1][0][RTW89_FCC][17] = 76, + [0][0][1][0][RTW89_ETSI][17] = 58, + [0][0][1][0][RTW89_MKK][17] = 76, + [0][0][1][0][RTW89_IC][17] = 76, + [0][0][1][0][RTW89_KCC][17] = 76, + [0][0][1][0][RTW89_ACMA][17] = 58, + [0][0][1][0][RTW89_CHILE][17] = 54, + [0][0][1][0][RTW89_UKRAINE][17] = 52, + [0][0][1][0][RTW89_MEXICO][17] = 76, + [0][0][1][0][RTW89_CN][17] = 127, + [0][0][1][0][RTW89_QATAR][17] = 52, + [0][0][1][0][RTW89_FCC][19] = 76, + [0][0][1][0][RTW89_ETSI][19] = 58, + [0][0][1][0][RTW89_MKK][19] = 76, + [0][0][1][0][RTW89_IC][19] = 76, + [0][0][1][0][RTW89_KCC][19] = 76, + [0][0][1][0][RTW89_ACMA][19] = 58, + [0][0][1][0][RTW89_CHILE][19] = 54, + [0][0][1][0][RTW89_UKRAINE][19] = 52, + [0][0][1][0][RTW89_MEXICO][19] = 76, + [0][0][1][0][RTW89_CN][19] = 127, + [0][0][1][0][RTW89_QATAR][19] = 52, + [0][0][1][0][RTW89_FCC][21] = 76, + [0][0][1][0][RTW89_ETSI][21] = 58, + [0][0][1][0][RTW89_MKK][21] = 76, + [0][0][1][0][RTW89_IC][21] = 76, + [0][0][1][0][RTW89_KCC][21] = 76, + [0][0][1][0][RTW89_ACMA][21] = 58, + [0][0][1][0][RTW89_CHILE][21] = 54, + [0][0][1][0][RTW89_UKRAINE][21] = 52, + [0][0][1][0][RTW89_MEXICO][21] = 76, + [0][0][1][0][RTW89_CN][21] = 127, + [0][0][1][0][RTW89_QATAR][21] = 52, + [0][0][1][0][RTW89_FCC][23] = 76, + [0][0][1][0][RTW89_ETSI][23] = 58, + [0][0][1][0][RTW89_MKK][23] = 76, + [0][0][1][0][RTW89_IC][23] = 76, + [0][0][1][0][RTW89_KCC][23] = 76, + [0][0][1][0][RTW89_ACMA][23] = 58, + [0][0][1][0][RTW89_CHILE][23] = 54, + [0][0][1][0][RTW89_UKRAINE][23] = 52, + [0][0][1][0][RTW89_MEXICO][23] = 76, + [0][0][1][0][RTW89_CN][23] = 127, + [0][0][1][0][RTW89_QATAR][23] = 52, + [0][0][1][0][RTW89_FCC][25] = 76, + [0][0][1][0][RTW89_ETSI][25] = 58, + [0][0][1][0][RTW89_MKK][25] = 76, + [0][0][1][0][RTW89_IC][25] = 127, + [0][0][1][0][RTW89_KCC][25] = 76, + [0][0][1][0][RTW89_ACMA][25] = 127, + [0][0][1][0][RTW89_CHILE][25] = 54, + [0][0][1][0][RTW89_UKRAINE][25] = 52, + [0][0][1][0][RTW89_MEXICO][25] = 76, + [0][0][1][0][RTW89_CN][25] = 127, + [0][0][1][0][RTW89_QATAR][25] = 52, + [0][0][1][0][RTW89_FCC][27] = 76, + [0][0][1][0][RTW89_ETSI][27] = 58, + [0][0][1][0][RTW89_MKK][27] = 76, + [0][0][1][0][RTW89_IC][27] = 127, + [0][0][1][0][RTW89_KCC][27] = 76, + [0][0][1][0][RTW89_ACMA][27] = 127, + [0][0][1][0][RTW89_CHILE][27] = 54, + [0][0][1][0][RTW89_UKRAINE][27] = 52, + [0][0][1][0][RTW89_MEXICO][27] = 76, + [0][0][1][0][RTW89_CN][27] = 127, + [0][0][1][0][RTW89_QATAR][27] = 52, + [0][0][1][0][RTW89_FCC][29] = 76, + [0][0][1][0][RTW89_ETSI][29] = 58, + [0][0][1][0][RTW89_MKK][29] = 76, + [0][0][1][0][RTW89_IC][29] = 127, + [0][0][1][0][RTW89_KCC][29] = 76, + [0][0][1][0][RTW89_ACMA][29] = 127, + [0][0][1][0][RTW89_CHILE][29] = 54, + [0][0][1][0][RTW89_UKRAINE][29] = 52, + [0][0][1][0][RTW89_MEXICO][29] = 76, + [0][0][1][0][RTW89_CN][29] = 127, + [0][0][1][0][RTW89_QATAR][29] = 52, + [0][0][1][0][RTW89_FCC][31] = 76, + [0][0][1][0][RTW89_ETSI][31] = 58, + [0][0][1][0][RTW89_MKK][31] = 76, + [0][0][1][0][RTW89_IC][31] = 76, + [0][0][1][0][RTW89_KCC][31] = 76, + [0][0][1][0][RTW89_ACMA][31] = 58, + [0][0][1][0][RTW89_CHILE][31] = 54, + [0][0][1][0][RTW89_UKRAINE][31] = 52, + [0][0][1][0][RTW89_MEXICO][31] = 76, + [0][0][1][0][RTW89_CN][31] = 127, + [0][0][1][0][RTW89_QATAR][31] = 52, + [0][0][1][0][RTW89_FCC][33] = 76, + [0][0][1][0][RTW89_ETSI][33] = 58, + [0][0][1][0][RTW89_MKK][33] = 76, + [0][0][1][0][RTW89_IC][33] = 76, + [0][0][1][0][RTW89_KCC][33] = 76, + [0][0][1][0][RTW89_ACMA][33] = 58, + [0][0][1][0][RTW89_CHILE][33] = 54, + [0][0][1][0][RTW89_UKRAINE][33] = 52, + [0][0][1][0][RTW89_MEXICO][33] = 76, + [0][0][1][0][RTW89_CN][33] = 127, + [0][0][1][0][RTW89_QATAR][33] = 52, + [0][0][1][0][RTW89_FCC][35] = 74, + [0][0][1][0][RTW89_ETSI][35] = 58, + [0][0][1][0][RTW89_MKK][35] = 76, + [0][0][1][0][RTW89_IC][35] = 74, + [0][0][1][0][RTW89_KCC][35] = 76, + [0][0][1][0][RTW89_ACMA][35] = 58, + [0][0][1][0][RTW89_CHILE][35] = 54, + [0][0][1][0][RTW89_UKRAINE][35] = 52, + [0][0][1][0][RTW89_MEXICO][35] = 74, + [0][0][1][0][RTW89_CN][35] = 127, + [0][0][1][0][RTW89_QATAR][35] = 52, + [0][0][1][0][RTW89_FCC][37] = 76, + [0][0][1][0][RTW89_ETSI][37] = 127, + [0][0][1][0][RTW89_MKK][37] = 76, + [0][0][1][0][RTW89_IC][37] = 76, + [0][0][1][0][RTW89_KCC][37] = 76, + [0][0][1][0][RTW89_ACMA][37] = 76, + [0][0][1][0][RTW89_CHILE][37] = 54, + [0][0][1][0][RTW89_UKRAINE][37] = 127, + [0][0][1][0][RTW89_MEXICO][37] = 76, + [0][0][1][0][RTW89_CN][37] = 127, + [0][0][1][0][RTW89_QATAR][37] = 127, + [0][0][1][0][RTW89_FCC][38] = 76, + [0][0][1][0][RTW89_ETSI][38] = 28, + [0][0][1][0][RTW89_MKK][38] = 127, + [0][0][1][0][RTW89_IC][38] = 76, + [0][0][1][0][RTW89_KCC][38] = 76, + [0][0][1][0][RTW89_ACMA][38] = 76, + [0][0][1][0][RTW89_CHILE][38] = 54, + [0][0][1][0][RTW89_UKRAINE][38] = 28, + [0][0][1][0][RTW89_MEXICO][38] = 76, + [0][0][1][0][RTW89_CN][38] = 72, + [0][0][1][0][RTW89_QATAR][38] = 28, + [0][0][1][0][RTW89_FCC][40] = 76, + [0][0][1][0][RTW89_ETSI][40] = 28, + [0][0][1][0][RTW89_MKK][40] = 127, + [0][0][1][0][RTW89_IC][40] = 76, + [0][0][1][0][RTW89_KCC][40] = 76, + [0][0][1][0][RTW89_ACMA][40] = 76, + [0][0][1][0][RTW89_CHILE][40] = 54, + [0][0][1][0][RTW89_UKRAINE][40] = 28, + [0][0][1][0][RTW89_MEXICO][40] = 76, + [0][0][1][0][RTW89_CN][40] = 76, + [0][0][1][0][RTW89_QATAR][40] = 28, + [0][0][1][0][RTW89_FCC][42] = 76, + [0][0][1][0][RTW89_ETSI][42] = 28, + [0][0][1][0][RTW89_MKK][42] = 127, + [0][0][1][0][RTW89_IC][42] = 76, + [0][0][1][0][RTW89_KCC][42] = 76, + [0][0][1][0][RTW89_ACMA][42] = 76, + [0][0][1][0][RTW89_CHILE][42] = 54, + [0][0][1][0][RTW89_UKRAINE][42] = 28, + [0][0][1][0][RTW89_MEXICO][42] = 76, + [0][0][1][0][RTW89_CN][42] = 76, + [0][0][1][0][RTW89_QATAR][42] = 28, + [0][0][1][0][RTW89_FCC][44] = 76, + [0][0][1][0][RTW89_ETSI][44] = 28, + [0][0][1][0][RTW89_MKK][44] = 127, + [0][0][1][0][RTW89_IC][44] = 76, + [0][0][1][0][RTW89_KCC][44] = 76, + [0][0][1][0][RTW89_ACMA][44] = 76, + [0][0][1][0][RTW89_CHILE][44] = 54, + [0][0][1][0][RTW89_UKRAINE][44] = 28, + [0][0][1][0][RTW89_MEXICO][44] = 76, + [0][0][1][0][RTW89_CN][44] = 76, + [0][0][1][0][RTW89_QATAR][44] = 28, + [0][0][1][0][RTW89_FCC][46] = 76, + [0][0][1][0][RTW89_ETSI][46] = 28, + [0][0][1][0][RTW89_MKK][46] = 127, + [0][0][1][0][RTW89_IC][46] = 76, + [0][0][1][0][RTW89_KCC][46] = 76, + [0][0][1][0][RTW89_ACMA][46] = 76, + [0][0][1][0][RTW89_CHILE][46] = 54, + [0][0][1][0][RTW89_UKRAINE][46] = 28, + [0][0][1][0][RTW89_MEXICO][46] = 76, + [0][0][1][0][RTW89_CN][46] = 76, + [0][0][1][0][RTW89_QATAR][46] = 28, + [0][1][1][0][RTW89_FCC][0] = 68, + [0][1][1][0][RTW89_ETSI][0] = 46, + [0][1][1][0][RTW89_MKK][0] = 50, + [0][1][1][0][RTW89_IC][0] = 40, + [0][1][1][0][RTW89_KCC][0] = 72, + [0][1][1][0][RTW89_ACMA][0] = 46, + [0][1][1][0][RTW89_CHILE][0] = 18, + [0][1][1][0][RTW89_UKRAINE][0] = 40, + [0][1][1][0][RTW89_MEXICO][0] = 50, + [0][1][1][0][RTW89_CN][0] = 46, + [0][1][1][0][RTW89_QATAR][0] = 46, + [0][1][1][0][RTW89_FCC][2] = 68, + [0][1][1][0][RTW89_ETSI][2] = 46, + [0][1][1][0][RTW89_MKK][2] = 50, + [0][1][1][0][RTW89_IC][2] = 40, + [0][1][1][0][RTW89_KCC][2] = 72, + [0][1][1][0][RTW89_ACMA][2] = 46, + [0][1][1][0][RTW89_CHILE][2] = 18, + [0][1][1][0][RTW89_UKRAINE][2] = 40, + [0][1][1][0][RTW89_MEXICO][2] = 50, + [0][1][1][0][RTW89_CN][2] = 46, + [0][1][1][0][RTW89_QATAR][2] = 46, + [0][1][1][0][RTW89_FCC][4] = 68, + [0][1][1][0][RTW89_ETSI][4] = 46, + [0][1][1][0][RTW89_MKK][4] = 50, + [0][1][1][0][RTW89_IC][4] = 40, + [0][1][1][0][RTW89_KCC][4] = 72, + [0][1][1][0][RTW89_ACMA][4] = 46, + [0][1][1][0][RTW89_CHILE][4] = 18, + [0][1][1][0][RTW89_UKRAINE][4] = 40, + [0][1][1][0][RTW89_MEXICO][4] = 50, + [0][1][1][0][RTW89_CN][4] = 46, + [0][1][1][0][RTW89_QATAR][4] = 46, + [0][1][1][0][RTW89_FCC][6] = 68, + [0][1][1][0][RTW89_ETSI][6] = 46, + [0][1][1][0][RTW89_MKK][6] = 50, + [0][1][1][0][RTW89_IC][6] = 40, + [0][1][1][0][RTW89_KCC][6] = 44, + [0][1][1][0][RTW89_ACMA][6] = 46, + [0][1][1][0][RTW89_CHILE][6] = 18, + [0][1][1][0][RTW89_UKRAINE][6] = 40, + [0][1][1][0][RTW89_MEXICO][6] = 50, + [0][1][1][0][RTW89_CN][6] = 46, + [0][1][1][0][RTW89_QATAR][6] = 46, + [0][1][1][0][RTW89_FCC][8] = 68, + [0][1][1][0][RTW89_ETSI][8] = 46, + [0][1][1][0][RTW89_MKK][8] = 50, + [0][1][1][0][RTW89_IC][8] = 52, + [0][1][1][0][RTW89_KCC][8] = 72, + [0][1][1][0][RTW89_ACMA][8] = 46, + [0][1][1][0][RTW89_CHILE][8] = 42, + [0][1][1][0][RTW89_UKRAINE][8] = 40, + [0][1][1][0][RTW89_MEXICO][8] = 68, + [0][1][1][0][RTW89_CN][8] = 46, + [0][1][1][0][RTW89_QATAR][8] = 46, + [0][1][1][0][RTW89_FCC][10] = 68, + [0][1][1][0][RTW89_ETSI][10] = 46, + [0][1][1][0][RTW89_MKK][10] = 50, + [0][1][1][0][RTW89_IC][10] = 52, + [0][1][1][0][RTW89_KCC][10] = 72, + [0][1][1][0][RTW89_ACMA][10] = 46, + [0][1][1][0][RTW89_CHILE][10] = 42, + [0][1][1][0][RTW89_UKRAINE][10] = 40, + [0][1][1][0][RTW89_MEXICO][10] = 68, + [0][1][1][0][RTW89_CN][10] = 46, + [0][1][1][0][RTW89_QATAR][10] = 46, + [0][1][1][0][RTW89_FCC][12] = 68, + [0][1][1][0][RTW89_ETSI][12] = 46, + [0][1][1][0][RTW89_MKK][12] = 50, + [0][1][1][0][RTW89_IC][12] = 52, + [0][1][1][0][RTW89_KCC][12] = 72, + [0][1][1][0][RTW89_ACMA][12] = 46, + [0][1][1][0][RTW89_CHILE][12] = 42, + [0][1][1][0][RTW89_UKRAINE][12] = 40, + [0][1][1][0][RTW89_MEXICO][12] = 68, + [0][1][1][0][RTW89_CN][12] = 46, + [0][1][1][0][RTW89_QATAR][12] = 46, + [0][1][1][0][RTW89_FCC][14] = 68, + [0][1][1][0][RTW89_ETSI][14] = 46, + [0][1][1][0][RTW89_MKK][14] = 50, + [0][1][1][0][RTW89_IC][14] = 52, + [0][1][1][0][RTW89_KCC][14] = 72, + [0][1][1][0][RTW89_ACMA][14] = 46, + [0][1][1][0][RTW89_CHILE][14] = 42, + [0][1][1][0][RTW89_UKRAINE][14] = 40, + [0][1][1][0][RTW89_MEXICO][14] = 68, + [0][1][1][0][RTW89_CN][14] = 46, + [0][1][1][0][RTW89_QATAR][14] = 46, + [0][1][1][0][RTW89_FCC][15] = 68, + [0][1][1][0][RTW89_ETSI][15] = 46, + [0][1][1][0][RTW89_MKK][15] = 70, + [0][1][1][0][RTW89_IC][15] = 68, + [0][1][1][0][RTW89_KCC][15] = 72, + [0][1][1][0][RTW89_ACMA][15] = 46, + [0][1][1][0][RTW89_CHILE][15] = 42, + [0][1][1][0][RTW89_UKRAINE][15] = 40, + [0][1][1][0][RTW89_MEXICO][15] = 68, + [0][1][1][0][RTW89_CN][15] = 127, + [0][1][1][0][RTW89_QATAR][15] = 40, + [0][1][1][0][RTW89_FCC][17] = 68, + [0][1][1][0][RTW89_ETSI][17] = 46, + [0][1][1][0][RTW89_MKK][17] = 70, + [0][1][1][0][RTW89_IC][17] = 68, + [0][1][1][0][RTW89_KCC][17] = 72, + [0][1][1][0][RTW89_ACMA][17] = 46, + [0][1][1][0][RTW89_CHILE][17] = 42, + [0][1][1][0][RTW89_UKRAINE][17] = 40, + [0][1][1][0][RTW89_MEXICO][17] = 68, + [0][1][1][0][RTW89_CN][17] = 127, + [0][1][1][0][RTW89_QATAR][17] = 40, + [0][1][1][0][RTW89_FCC][19] = 68, + [0][1][1][0][RTW89_ETSI][19] = 46, + [0][1][1][0][RTW89_MKK][19] = 70, + [0][1][1][0][RTW89_IC][19] = 68, + [0][1][1][0][RTW89_KCC][19] = 72, + [0][1][1][0][RTW89_ACMA][19] = 46, + [0][1][1][0][RTW89_CHILE][19] = 42, + [0][1][1][0][RTW89_UKRAINE][19] = 40, + [0][1][1][0][RTW89_MEXICO][19] = 68, + [0][1][1][0][RTW89_CN][19] = 127, + [0][1][1][0][RTW89_QATAR][19] = 40, + [0][1][1][0][RTW89_FCC][21] = 68, + [0][1][1][0][RTW89_ETSI][21] = 46, + [0][1][1][0][RTW89_MKK][21] = 70, + [0][1][1][0][RTW89_IC][21] = 68, + [0][1][1][0][RTW89_KCC][21] = 72, + [0][1][1][0][RTW89_ACMA][21] = 46, + [0][1][1][0][RTW89_CHILE][21] = 42, + [0][1][1][0][RTW89_UKRAINE][21] = 40, + [0][1][1][0][RTW89_MEXICO][21] = 68, + [0][1][1][0][RTW89_CN][21] = 127, + [0][1][1][0][RTW89_QATAR][21] = 40, + [0][1][1][0][RTW89_FCC][23] = 68, + [0][1][1][0][RTW89_ETSI][23] = 46, + [0][1][1][0][RTW89_MKK][23] = 70, + [0][1][1][0][RTW89_IC][23] = 68, + [0][1][1][0][RTW89_KCC][23] = 72, + [0][1][1][0][RTW89_ACMA][23] = 46, + [0][1][1][0][RTW89_CHILE][23] = 42, + [0][1][1][0][RTW89_UKRAINE][23] = 40, + [0][1][1][0][RTW89_MEXICO][23] = 68, + [0][1][1][0][RTW89_CN][23] = 127, + [0][1][1][0][RTW89_QATAR][23] = 40, + [0][1][1][0][RTW89_FCC][25] = 68, + [0][1][1][0][RTW89_ETSI][25] = 46, + [0][1][1][0][RTW89_MKK][25] = 70, + [0][1][1][0][RTW89_IC][25] = 127, + [0][1][1][0][RTW89_KCC][25] = 72, + [0][1][1][0][RTW89_ACMA][25] = 127, + [0][1][1][0][RTW89_CHILE][25] = 42, + [0][1][1][0][RTW89_UKRAINE][25] = 40, + [0][1][1][0][RTW89_MEXICO][25] = 68, + [0][1][1][0][RTW89_CN][25] = 127, + [0][1][1][0][RTW89_QATAR][25] = 40, + [0][1][1][0][RTW89_FCC][27] = 68, + [0][1][1][0][RTW89_ETSI][27] = 46, + [0][1][1][0][RTW89_MKK][27] = 70, + [0][1][1][0][RTW89_IC][27] = 127, + [0][1][1][0][RTW89_KCC][27] = 72, + [0][1][1][0][RTW89_ACMA][27] = 127, + [0][1][1][0][RTW89_CHILE][27] = 42, + [0][1][1][0][RTW89_UKRAINE][27] = 40, + [0][1][1][0][RTW89_MEXICO][27] = 68, + [0][1][1][0][RTW89_CN][27] = 127, + [0][1][1][0][RTW89_QATAR][27] = 40, + [0][1][1][0][RTW89_FCC][29] = 68, + [0][1][1][0][RTW89_ETSI][29] = 46, + [0][1][1][0][RTW89_MKK][29] = 70, + [0][1][1][0][RTW89_IC][29] = 127, + [0][1][1][0][RTW89_KCC][29] = 72, + [0][1][1][0][RTW89_ACMA][29] = 127, + [0][1][1][0][RTW89_CHILE][29] = 42, + [0][1][1][0][RTW89_UKRAINE][29] = 40, + [0][1][1][0][RTW89_MEXICO][29] = 68, + [0][1][1][0][RTW89_CN][29] = 127, + [0][1][1][0][RTW89_QATAR][29] = 40, + [0][1][1][0][RTW89_FCC][31] = 68, + [0][1][1][0][RTW89_ETSI][31] = 46, + [0][1][1][0][RTW89_MKK][31] = 70, + [0][1][1][0][RTW89_IC][31] = 68, + [0][1][1][0][RTW89_KCC][31] = 72, + [0][1][1][0][RTW89_ACMA][31] = 46, + [0][1][1][0][RTW89_CHILE][31] = 42, + [0][1][1][0][RTW89_UKRAINE][31] = 40, + [0][1][1][0][RTW89_MEXICO][31] = 68, + [0][1][1][0][RTW89_CN][31] = 127, + [0][1][1][0][RTW89_QATAR][31] = 40, + [0][1][1][0][RTW89_FCC][33] = 68, + [0][1][1][0][RTW89_ETSI][33] = 46, + [0][1][1][0][RTW89_MKK][33] = 70, + [0][1][1][0][RTW89_IC][33] = 68, + [0][1][1][0][RTW89_KCC][33] = 72, + [0][1][1][0][RTW89_ACMA][33] = 46, + [0][1][1][0][RTW89_CHILE][33] = 42, + [0][1][1][0][RTW89_UKRAINE][33] = 40, + [0][1][1][0][RTW89_MEXICO][33] = 68, + [0][1][1][0][RTW89_CN][33] = 127, + [0][1][1][0][RTW89_QATAR][33] = 40, + [0][1][1][0][RTW89_FCC][35] = 66, + [0][1][1][0][RTW89_ETSI][35] = 46, + [0][1][1][0][RTW89_MKK][35] = 70, + [0][1][1][0][RTW89_IC][35] = 66, + [0][1][1][0][RTW89_KCC][35] = 72, + [0][1][1][0][RTW89_ACMA][35] = 46, + [0][1][1][0][RTW89_CHILE][35] = 42, + [0][1][1][0][RTW89_UKRAINE][35] = 40, + [0][1][1][0][RTW89_MEXICO][35] = 66, + [0][1][1][0][RTW89_CN][35] = 127, + [0][1][1][0][RTW89_QATAR][35] = 40, + [0][1][1][0][RTW89_FCC][37] = 68, + [0][1][1][0][RTW89_ETSI][37] = 127, + [0][1][1][0][RTW89_MKK][37] = 70, + [0][1][1][0][RTW89_IC][37] = 68, + [0][1][1][0][RTW89_KCC][37] = 72, + [0][1][1][0][RTW89_ACMA][37] = 68, + [0][1][1][0][RTW89_CHILE][37] = 42, + [0][1][1][0][RTW89_UKRAINE][37] = 127, + [0][1][1][0][RTW89_MEXICO][37] = 68, + [0][1][1][0][RTW89_CN][37] = 127, + [0][1][1][0][RTW89_QATAR][37] = 127, + [0][1][1][0][RTW89_FCC][38] = 76, + [0][1][1][0][RTW89_ETSI][38] = 16, + [0][1][1][0][RTW89_MKK][38] = 127, + [0][1][1][0][RTW89_IC][38] = 76, + [0][1][1][0][RTW89_KCC][38] = 72, + [0][1][1][0][RTW89_ACMA][38] = 76, + [0][1][1][0][RTW89_CHILE][38] = 42, + [0][1][1][0][RTW89_UKRAINE][38] = 16, + [0][1][1][0][RTW89_MEXICO][38] = 76, + [0][1][1][0][RTW89_CN][38] = 72, + [0][1][1][0][RTW89_QATAR][38] = 16, + [0][1][1][0][RTW89_FCC][40] = 76, + [0][1][1][0][RTW89_ETSI][40] = 16, + [0][1][1][0][RTW89_MKK][40] = 127, + [0][1][1][0][RTW89_IC][40] = 76, + [0][1][1][0][RTW89_KCC][40] = 72, + [0][1][1][0][RTW89_ACMA][40] = 76, + [0][1][1][0][RTW89_CHILE][40] = 42, + [0][1][1][0][RTW89_UKRAINE][40] = 16, + [0][1][1][0][RTW89_MEXICO][40] = 76, + [0][1][1][0][RTW89_CN][40] = 76, + [0][1][1][0][RTW89_QATAR][40] = 16, + [0][1][1][0][RTW89_FCC][42] = 76, + [0][1][1][0][RTW89_ETSI][42] = 16, + [0][1][1][0][RTW89_MKK][42] = 127, + [0][1][1][0][RTW89_IC][42] = 76, + [0][1][1][0][RTW89_KCC][42] = 72, + [0][1][1][0][RTW89_ACMA][42] = 76, + [0][1][1][0][RTW89_CHILE][42] = 42, + [0][1][1][0][RTW89_UKRAINE][42] = 16, + [0][1][1][0][RTW89_MEXICO][42] = 76, + [0][1][1][0][RTW89_CN][42] = 76, + [0][1][1][0][RTW89_QATAR][42] = 16, + [0][1][1][0][RTW89_FCC][44] = 76, + [0][1][1][0][RTW89_ETSI][44] = 16, + [0][1][1][0][RTW89_MKK][44] = 127, + [0][1][1][0][RTW89_IC][44] = 76, + [0][1][1][0][RTW89_KCC][44] = 72, + [0][1][1][0][RTW89_ACMA][44] = 76, + [0][1][1][0][RTW89_CHILE][44] = 42, + [0][1][1][0][RTW89_UKRAINE][44] = 16, + [0][1][1][0][RTW89_MEXICO][44] = 76, + [0][1][1][0][RTW89_CN][44] = 76, + [0][1][1][0][RTW89_QATAR][44] = 16, + [0][1][1][0][RTW89_FCC][46] = 76, + [0][1][1][0][RTW89_ETSI][46] = 16, + [0][1][1][0][RTW89_MKK][46] = 127, + [0][1][1][0][RTW89_IC][46] = 76, + [0][1][1][0][RTW89_KCC][46] = 72, + [0][1][1][0][RTW89_ACMA][46] = 76, + [0][1][1][0][RTW89_CHILE][46] = 42, + [0][1][1][0][RTW89_UKRAINE][46] = 16, + [0][1][1][0][RTW89_MEXICO][46] = 76, + [0][1][1][0][RTW89_CN][46] = 76, + [0][1][1][0][RTW89_QATAR][46] = 16, + [0][0][2][0][RTW89_FCC][0] = 76, + [0][0][2][0][RTW89_ETSI][0] = 58, + [0][0][2][0][RTW89_MKK][0] = 62, + [0][0][2][0][RTW89_IC][0] = 62, + [0][0][2][0][RTW89_KCC][0] = 76, + [0][0][2][0][RTW89_ACMA][0] = 58, + [0][0][2][0][RTW89_CHILE][0] = 30, + [0][0][2][0][RTW89_UKRAINE][0] = 52, + [0][0][2][0][RTW89_MEXICO][0] = 62, + [0][0][2][0][RTW89_CN][0] = 58, + [0][0][2][0][RTW89_QATAR][0] = 58, + [0][0][2][0][RTW89_FCC][2] = 76, + [0][0][2][0][RTW89_ETSI][2] = 58, + [0][0][2][0][RTW89_MKK][2] = 62, + [0][0][2][0][RTW89_IC][2] = 62, + [0][0][2][0][RTW89_KCC][2] = 76, + [0][0][2][0][RTW89_ACMA][2] = 58, + [0][0][2][0][RTW89_CHILE][2] = 30, + [0][0][2][0][RTW89_UKRAINE][2] = 52, + [0][0][2][0][RTW89_MEXICO][2] = 62, + [0][0][2][0][RTW89_CN][2] = 58, + [0][0][2][0][RTW89_QATAR][2] = 58, + [0][0][2][0][RTW89_FCC][4] = 76, + [0][0][2][0][RTW89_ETSI][4] = 58, + [0][0][2][0][RTW89_MKK][4] = 62, + [0][0][2][0][RTW89_IC][4] = 62, + [0][0][2][0][RTW89_KCC][4] = 76, + [0][0][2][0][RTW89_ACMA][4] = 58, + [0][0][2][0][RTW89_CHILE][4] = 30, + [0][0][2][0][RTW89_UKRAINE][4] = 52, + [0][0][2][0][RTW89_MEXICO][4] = 62, + [0][0][2][0][RTW89_CN][4] = 58, + [0][0][2][0][RTW89_QATAR][4] = 58, + [0][0][2][0][RTW89_FCC][6] = 76, + [0][0][2][0][RTW89_ETSI][6] = 58, + [0][0][2][0][RTW89_MKK][6] = 62, + [0][0][2][0][RTW89_IC][6] = 62, + [0][0][2][0][RTW89_KCC][6] = 58, + [0][0][2][0][RTW89_ACMA][6] = 58, + [0][0][2][0][RTW89_CHILE][6] = 30, + [0][0][2][0][RTW89_UKRAINE][6] = 52, + [0][0][2][0][RTW89_MEXICO][6] = 62, + [0][0][2][0][RTW89_CN][6] = 58, + [0][0][2][0][RTW89_QATAR][6] = 58, + [0][0][2][0][RTW89_FCC][8] = 76, + [0][0][2][0][RTW89_ETSI][8] = 58, + [0][0][2][0][RTW89_MKK][8] = 62, + [0][0][2][0][RTW89_IC][8] = 64, + [0][0][2][0][RTW89_KCC][8] = 76, + [0][0][2][0][RTW89_ACMA][8] = 58, + [0][0][2][0][RTW89_CHILE][8] = 54, + [0][0][2][0][RTW89_UKRAINE][8] = 52, + [0][0][2][0][RTW89_MEXICO][8] = 76, + [0][0][2][0][RTW89_CN][8] = 58, + [0][0][2][0][RTW89_QATAR][8] = 58, + [0][0][2][0][RTW89_FCC][10] = 76, + [0][0][2][0][RTW89_ETSI][10] = 58, + [0][0][2][0][RTW89_MKK][10] = 62, + [0][0][2][0][RTW89_IC][10] = 64, + [0][0][2][0][RTW89_KCC][10] = 76, + [0][0][2][0][RTW89_ACMA][10] = 58, + [0][0][2][0][RTW89_CHILE][10] = 54, + [0][0][2][0][RTW89_UKRAINE][10] = 52, + [0][0][2][0][RTW89_MEXICO][10] = 76, + [0][0][2][0][RTW89_CN][10] = 58, + [0][0][2][0][RTW89_QATAR][10] = 58, + [0][0][2][0][RTW89_FCC][12] = 76, + [0][0][2][0][RTW89_ETSI][12] = 58, + [0][0][2][0][RTW89_MKK][12] = 62, + [0][0][2][0][RTW89_IC][12] = 64, + [0][0][2][0][RTW89_KCC][12] = 76, + [0][0][2][0][RTW89_ACMA][12] = 58, + [0][0][2][0][RTW89_CHILE][12] = 54, + [0][0][2][0][RTW89_UKRAINE][12] = 52, + [0][0][2][0][RTW89_MEXICO][12] = 76, + [0][0][2][0][RTW89_CN][12] = 58, + [0][0][2][0][RTW89_QATAR][12] = 58, + [0][0][2][0][RTW89_FCC][14] = 76, + [0][0][2][0][RTW89_ETSI][14] = 58, + [0][0][2][0][RTW89_MKK][14] = 62, + [0][0][2][0][RTW89_IC][14] = 64, + [0][0][2][0][RTW89_KCC][14] = 76, + [0][0][2][0][RTW89_ACMA][14] = 58, + [0][0][2][0][RTW89_CHILE][14] = 54, + [0][0][2][0][RTW89_UKRAINE][14] = 52, + [0][0][2][0][RTW89_MEXICO][14] = 76, + [0][0][2][0][RTW89_CN][14] = 58, + [0][0][2][0][RTW89_QATAR][14] = 58, + [0][0][2][0][RTW89_FCC][15] = 74, + [0][0][2][0][RTW89_ETSI][15] = 58, + [0][0][2][0][RTW89_MKK][15] = 76, + [0][0][2][0][RTW89_IC][15] = 74, + [0][0][2][0][RTW89_KCC][15] = 76, + [0][0][2][0][RTW89_ACMA][15] = 58, + [0][0][2][0][RTW89_CHILE][15] = 54, + [0][0][2][0][RTW89_UKRAINE][15] = 52, + [0][0][2][0][RTW89_MEXICO][15] = 74, + [0][0][2][0][RTW89_CN][15] = 127, + [0][0][2][0][RTW89_QATAR][15] = 52, + [0][0][2][0][RTW89_FCC][17] = 76, + [0][0][2][0][RTW89_ETSI][17] = 58, + [0][0][2][0][RTW89_MKK][17] = 76, + [0][0][2][0][RTW89_IC][17] = 76, + [0][0][2][0][RTW89_KCC][17] = 76, + [0][0][2][0][RTW89_ACMA][17] = 58, + [0][0][2][0][RTW89_CHILE][17] = 54, + [0][0][2][0][RTW89_UKRAINE][17] = 52, + [0][0][2][0][RTW89_MEXICO][17] = 76, + [0][0][2][0][RTW89_CN][17] = 127, + [0][0][2][0][RTW89_QATAR][17] = 52, + [0][0][2][0][RTW89_FCC][19] = 76, + [0][0][2][0][RTW89_ETSI][19] = 58, + [0][0][2][0][RTW89_MKK][19] = 76, + [0][0][2][0][RTW89_IC][19] = 76, + [0][0][2][0][RTW89_KCC][19] = 76, + [0][0][2][0][RTW89_ACMA][19] = 58, + [0][0][2][0][RTW89_CHILE][19] = 54, + [0][0][2][0][RTW89_UKRAINE][19] = 52, + [0][0][2][0][RTW89_MEXICO][19] = 76, + [0][0][2][0][RTW89_CN][19] = 127, + [0][0][2][0][RTW89_QATAR][19] = 52, + [0][0][2][0][RTW89_FCC][21] = 76, + [0][0][2][0][RTW89_ETSI][21] = 58, + [0][0][2][0][RTW89_MKK][21] = 76, + [0][0][2][0][RTW89_IC][21] = 76, + [0][0][2][0][RTW89_KCC][21] = 76, + [0][0][2][0][RTW89_ACMA][21] = 58, + [0][0][2][0][RTW89_CHILE][21] = 54, + [0][0][2][0][RTW89_UKRAINE][21] = 52, + [0][0][2][0][RTW89_MEXICO][21] = 76, + [0][0][2][0][RTW89_CN][21] = 127, + [0][0][2][0][RTW89_QATAR][21] = 52, + [0][0][2][0][RTW89_FCC][23] = 76, + [0][0][2][0][RTW89_ETSI][23] = 58, + [0][0][2][0][RTW89_MKK][23] = 76, + [0][0][2][0][RTW89_IC][23] = 76, + [0][0][2][0][RTW89_KCC][23] = 76, + [0][0][2][0][RTW89_ACMA][23] = 58, + [0][0][2][0][RTW89_CHILE][23] = 54, + [0][0][2][0][RTW89_UKRAINE][23] = 52, + [0][0][2][0][RTW89_MEXICO][23] = 76, + [0][0][2][0][RTW89_CN][23] = 127, + [0][0][2][0][RTW89_QATAR][23] = 52, + [0][0][2][0][RTW89_FCC][25] = 76, + [0][0][2][0][RTW89_ETSI][25] = 58, + [0][0][2][0][RTW89_MKK][25] = 76, + [0][0][2][0][RTW89_IC][25] = 127, + [0][0][2][0][RTW89_KCC][25] = 76, + [0][0][2][0][RTW89_ACMA][25] = 127, + [0][0][2][0][RTW89_CHILE][25] = 54, + [0][0][2][0][RTW89_UKRAINE][25] = 52, + [0][0][2][0][RTW89_MEXICO][25] = 76, + [0][0][2][0][RTW89_CN][25] = 127, + [0][0][2][0][RTW89_QATAR][25] = 52, + [0][0][2][0][RTW89_FCC][27] = 76, + [0][0][2][0][RTW89_ETSI][27] = 58, + [0][0][2][0][RTW89_MKK][27] = 76, + [0][0][2][0][RTW89_IC][27] = 127, + [0][0][2][0][RTW89_KCC][27] = 76, + [0][0][2][0][RTW89_ACMA][27] = 127, + [0][0][2][0][RTW89_CHILE][27] = 54, + [0][0][2][0][RTW89_UKRAINE][27] = 52, + [0][0][2][0][RTW89_MEXICO][27] = 76, + [0][0][2][0][RTW89_CN][27] = 127, + [0][0][2][0][RTW89_QATAR][27] = 52, + [0][0][2][0][RTW89_FCC][29] = 76, + [0][0][2][0][RTW89_ETSI][29] = 58, + [0][0][2][0][RTW89_MKK][29] = 76, + [0][0][2][0][RTW89_IC][29] = 127, + [0][0][2][0][RTW89_KCC][29] = 76, + [0][0][2][0][RTW89_ACMA][29] = 127, + [0][0][2][0][RTW89_CHILE][29] = 54, + [0][0][2][0][RTW89_UKRAINE][29] = 52, + [0][0][2][0][RTW89_MEXICO][29] = 76, + [0][0][2][0][RTW89_CN][29] = 127, + [0][0][2][0][RTW89_QATAR][29] = 52, + [0][0][2][0][RTW89_FCC][31] = 76, + [0][0][2][0][RTW89_ETSI][31] = 58, + [0][0][2][0][RTW89_MKK][31] = 76, + [0][0][2][0][RTW89_IC][31] = 76, + [0][0][2][0][RTW89_KCC][31] = 76, + [0][0][2][0][RTW89_ACMA][31] = 58, + [0][0][2][0][RTW89_CHILE][31] = 54, + [0][0][2][0][RTW89_UKRAINE][31] = 52, + [0][0][2][0][RTW89_MEXICO][31] = 76, + [0][0][2][0][RTW89_CN][31] = 127, + [0][0][2][0][RTW89_QATAR][31] = 52, + [0][0][2][0][RTW89_FCC][33] = 76, + [0][0][2][0][RTW89_ETSI][33] = 58, + [0][0][2][0][RTW89_MKK][33] = 76, + [0][0][2][0][RTW89_IC][33] = 76, + [0][0][2][0][RTW89_KCC][33] = 76, + [0][0][2][0][RTW89_ACMA][33] = 58, + [0][0][2][0][RTW89_CHILE][33] = 54, + [0][0][2][0][RTW89_UKRAINE][33] = 52, + [0][0][2][0][RTW89_MEXICO][33] = 76, + [0][0][2][0][RTW89_CN][33] = 127, + [0][0][2][0][RTW89_QATAR][33] = 52, + [0][0][2][0][RTW89_FCC][35] = 70, + [0][0][2][0][RTW89_ETSI][35] = 58, + [0][0][2][0][RTW89_MKK][35] = 76, + [0][0][2][0][RTW89_IC][35] = 70, + [0][0][2][0][RTW89_KCC][35] = 76, + [0][0][2][0][RTW89_ACMA][35] = 58, + [0][0][2][0][RTW89_CHILE][35] = 54, + [0][0][2][0][RTW89_UKRAINE][35] = 52, + [0][0][2][0][RTW89_MEXICO][35] = 70, + [0][0][2][0][RTW89_CN][35] = 127, + [0][0][2][0][RTW89_QATAR][35] = 52, + [0][0][2][0][RTW89_FCC][37] = 76, + [0][0][2][0][RTW89_ETSI][37] = 127, + [0][0][2][0][RTW89_MKK][37] = 76, + [0][0][2][0][RTW89_IC][37] = 76, + [0][0][2][0][RTW89_KCC][37] = 76, + [0][0][2][0][RTW89_ACMA][37] = 76, + [0][0][2][0][RTW89_CHILE][37] = 54, + [0][0][2][0][RTW89_UKRAINE][37] = 127, + [0][0][2][0][RTW89_MEXICO][37] = 76, + [0][0][2][0][RTW89_CN][37] = 127, + [0][0][2][0][RTW89_QATAR][37] = 127, + [0][0][2][0][RTW89_FCC][38] = 76, + [0][0][2][0][RTW89_ETSI][38] = 28, + [0][0][2][0][RTW89_MKK][38] = 127, + [0][0][2][0][RTW89_IC][38] = 76, + [0][0][2][0][RTW89_KCC][38] = 76, + [0][0][2][0][RTW89_ACMA][38] = 76, + [0][0][2][0][RTW89_CHILE][38] = 54, + [0][0][2][0][RTW89_UKRAINE][38] = 28, + [0][0][2][0][RTW89_MEXICO][38] = 76, + [0][0][2][0][RTW89_CN][38] = 68, + [0][0][2][0][RTW89_QATAR][38] = 28, + [0][0][2][0][RTW89_FCC][40] = 76, + [0][0][2][0][RTW89_ETSI][40] = 28, + [0][0][2][0][RTW89_MKK][40] = 127, + [0][0][2][0][RTW89_IC][40] = 76, + [0][0][2][0][RTW89_KCC][40] = 76, + [0][0][2][0][RTW89_ACMA][40] = 76, + [0][0][2][0][RTW89_CHILE][40] = 54, + [0][0][2][0][RTW89_UKRAINE][40] = 28, + [0][0][2][0][RTW89_MEXICO][40] = 76, + [0][0][2][0][RTW89_CN][40] = 76, + [0][0][2][0][RTW89_QATAR][40] = 28, + [0][0][2][0][RTW89_FCC][42] = 76, + [0][0][2][0][RTW89_ETSI][42] = 28, + [0][0][2][0][RTW89_MKK][42] = 127, + [0][0][2][0][RTW89_IC][42] = 76, + [0][0][2][0][RTW89_KCC][42] = 76, + [0][0][2][0][RTW89_ACMA][42] = 76, + [0][0][2][0][RTW89_CHILE][42] = 54, + [0][0][2][0][RTW89_UKRAINE][42] = 28, + [0][0][2][0][RTW89_MEXICO][42] = 76, + [0][0][2][0][RTW89_CN][42] = 76, + [0][0][2][0][RTW89_QATAR][42] = 28, + [0][0][2][0][RTW89_FCC][44] = 76, + [0][0][2][0][RTW89_ETSI][44] = 28, + [0][0][2][0][RTW89_MKK][44] = 127, + [0][0][2][0][RTW89_IC][44] = 76, + [0][0][2][0][RTW89_KCC][44] = 76, + [0][0][2][0][RTW89_ACMA][44] = 76, + [0][0][2][0][RTW89_CHILE][44] = 54, + [0][0][2][0][RTW89_UKRAINE][44] = 28, + [0][0][2][0][RTW89_MEXICO][44] = 76, + [0][0][2][0][RTW89_CN][44] = 76, + [0][0][2][0][RTW89_QATAR][44] = 28, + [0][0][2][0][RTW89_FCC][46] = 76, + [0][0][2][0][RTW89_ETSI][46] = 28, + [0][0][2][0][RTW89_MKK][46] = 127, + [0][0][2][0][RTW89_IC][46] = 76, + [0][0][2][0][RTW89_KCC][46] = 76, + [0][0][2][0][RTW89_ACMA][46] = 76, + [0][0][2][0][RTW89_CHILE][46] = 54, + [0][0][2][0][RTW89_UKRAINE][46] = 28, + [0][0][2][0][RTW89_MEXICO][46] = 76, + [0][0][2][0][RTW89_CN][46] = 76, + [0][0][2][0][RTW89_QATAR][46] = 28, + [0][1][2][0][RTW89_FCC][0] = 68, + [0][1][2][0][RTW89_ETSI][0] = 46, + [0][1][2][0][RTW89_MKK][0] = 50, + [0][1][2][0][RTW89_IC][0] = 40, + [0][1][2][0][RTW89_KCC][0] = 68, + [0][1][2][0][RTW89_ACMA][0] = 46, + [0][1][2][0][RTW89_CHILE][0] = 18, + [0][1][2][0][RTW89_UKRAINE][0] = 40, + [0][1][2][0][RTW89_MEXICO][0] = 50, + [0][1][2][0][RTW89_CN][0] = 46, + [0][1][2][0][RTW89_QATAR][0] = 46, + [0][1][2][0][RTW89_FCC][2] = 68, + [0][1][2][0][RTW89_ETSI][2] = 46, + [0][1][2][0][RTW89_MKK][2] = 50, + [0][1][2][0][RTW89_IC][2] = 40, + [0][1][2][0][RTW89_KCC][2] = 68, + [0][1][2][0][RTW89_ACMA][2] = 46, + [0][1][2][0][RTW89_CHILE][2] = 18, + [0][1][2][0][RTW89_UKRAINE][2] = 40, + [0][1][2][0][RTW89_MEXICO][2] = 50, + [0][1][2][0][RTW89_CN][2] = 46, + [0][1][2][0][RTW89_QATAR][2] = 46, + [0][1][2][0][RTW89_FCC][4] = 68, + [0][1][2][0][RTW89_ETSI][4] = 46, + [0][1][2][0][RTW89_MKK][4] = 50, + [0][1][2][0][RTW89_IC][4] = 40, + [0][1][2][0][RTW89_KCC][4] = 68, + [0][1][2][0][RTW89_ACMA][4] = 46, + [0][1][2][0][RTW89_CHILE][4] = 18, + [0][1][2][0][RTW89_UKRAINE][4] = 40, + [0][1][2][0][RTW89_MEXICO][4] = 50, + [0][1][2][0][RTW89_CN][4] = 46, + [0][1][2][0][RTW89_QATAR][4] = 46, + [0][1][2][0][RTW89_FCC][6] = 68, + [0][1][2][0][RTW89_ETSI][6] = 46, + [0][1][2][0][RTW89_MKK][6] = 50, + [0][1][2][0][RTW89_IC][6] = 40, + [0][1][2][0][RTW89_KCC][6] = 38, + [0][1][2][0][RTW89_ACMA][6] = 46, + [0][1][2][0][RTW89_CHILE][6] = 18, + [0][1][2][0][RTW89_UKRAINE][6] = 40, + [0][1][2][0][RTW89_MEXICO][6] = 50, + [0][1][2][0][RTW89_CN][6] = 46, + [0][1][2][0][RTW89_QATAR][6] = 46, + [0][1][2][0][RTW89_FCC][8] = 68, + [0][1][2][0][RTW89_ETSI][8] = 46, + [0][1][2][0][RTW89_MKK][8] = 50, + [0][1][2][0][RTW89_IC][8] = 52, + [0][1][2][0][RTW89_KCC][8] = 68, + [0][1][2][0][RTW89_ACMA][8] = 46, + [0][1][2][0][RTW89_CHILE][8] = 42, + [0][1][2][0][RTW89_UKRAINE][8] = 40, + [0][1][2][0][RTW89_MEXICO][8] = 68, + [0][1][2][0][RTW89_CN][8] = 46, + [0][1][2][0][RTW89_QATAR][8] = 46, + [0][1][2][0][RTW89_FCC][10] = 68, + [0][1][2][0][RTW89_ETSI][10] = 46, + [0][1][2][0][RTW89_MKK][10] = 50, + [0][1][2][0][RTW89_IC][10] = 52, + [0][1][2][0][RTW89_KCC][10] = 68, + [0][1][2][0][RTW89_ACMA][10] = 46, + [0][1][2][0][RTW89_CHILE][10] = 42, + [0][1][2][0][RTW89_UKRAINE][10] = 40, + [0][1][2][0][RTW89_MEXICO][10] = 68, + [0][1][2][0][RTW89_CN][10] = 46, + [0][1][2][0][RTW89_QATAR][10] = 46, + [0][1][2][0][RTW89_FCC][12] = 68, + [0][1][2][0][RTW89_ETSI][12] = 46, + [0][1][2][0][RTW89_MKK][12] = 50, + [0][1][2][0][RTW89_IC][12] = 52, + [0][1][2][0][RTW89_KCC][12] = 68, + [0][1][2][0][RTW89_ACMA][12] = 46, + [0][1][2][0][RTW89_CHILE][12] = 42, + [0][1][2][0][RTW89_UKRAINE][12] = 40, + [0][1][2][0][RTW89_MEXICO][12] = 68, + [0][1][2][0][RTW89_CN][12] = 46, + [0][1][2][0][RTW89_QATAR][12] = 46, + [0][1][2][0][RTW89_FCC][14] = 68, + [0][1][2][0][RTW89_ETSI][14] = 46, + [0][1][2][0][RTW89_MKK][14] = 50, + [0][1][2][0][RTW89_IC][14] = 52, + [0][1][2][0][RTW89_KCC][14] = 68, + [0][1][2][0][RTW89_ACMA][14] = 46, + [0][1][2][0][RTW89_CHILE][14] = 42, + [0][1][2][0][RTW89_UKRAINE][14] = 40, + [0][1][2][0][RTW89_MEXICO][14] = 68, + [0][1][2][0][RTW89_CN][14] = 46, + [0][1][2][0][RTW89_QATAR][14] = 46, + [0][1][2][0][RTW89_FCC][15] = 68, + [0][1][2][0][RTW89_ETSI][15] = 46, + [0][1][2][0][RTW89_MKK][15] = 70, + [0][1][2][0][RTW89_IC][15] = 68, + [0][1][2][0][RTW89_KCC][15] = 66, + [0][1][2][0][RTW89_ACMA][15] = 46, + [0][1][2][0][RTW89_CHILE][15] = 42, + [0][1][2][0][RTW89_UKRAINE][15] = 40, + [0][1][2][0][RTW89_MEXICO][15] = 68, + [0][1][2][0][RTW89_CN][15] = 127, + [0][1][2][0][RTW89_QATAR][15] = 40, + [0][1][2][0][RTW89_FCC][17] = 68, + [0][1][2][0][RTW89_ETSI][17] = 46, + [0][1][2][0][RTW89_MKK][17] = 70, + [0][1][2][0][RTW89_IC][17] = 68, + [0][1][2][0][RTW89_KCC][17] = 66, + [0][1][2][0][RTW89_ACMA][17] = 46, + [0][1][2][0][RTW89_CHILE][17] = 42, + [0][1][2][0][RTW89_UKRAINE][17] = 40, + [0][1][2][0][RTW89_MEXICO][17] = 68, + [0][1][2][0][RTW89_CN][17] = 127, + [0][1][2][0][RTW89_QATAR][17] = 40, + [0][1][2][0][RTW89_FCC][19] = 68, + [0][1][2][0][RTW89_ETSI][19] = 46, + [0][1][2][0][RTW89_MKK][19] = 70, + [0][1][2][0][RTW89_IC][19] = 68, + [0][1][2][0][RTW89_KCC][19] = 66, + [0][1][2][0][RTW89_ACMA][19] = 46, + [0][1][2][0][RTW89_CHILE][19] = 42, + [0][1][2][0][RTW89_UKRAINE][19] = 40, + [0][1][2][0][RTW89_MEXICO][19] = 68, + [0][1][2][0][RTW89_CN][19] = 127, + [0][1][2][0][RTW89_QATAR][19] = 40, + [0][1][2][0][RTW89_FCC][21] = 68, + [0][1][2][0][RTW89_ETSI][21] = 46, + [0][1][2][0][RTW89_MKK][21] = 70, + [0][1][2][0][RTW89_IC][21] = 68, + [0][1][2][0][RTW89_KCC][21] = 66, + [0][1][2][0][RTW89_ACMA][21] = 46, + [0][1][2][0][RTW89_CHILE][21] = 42, + [0][1][2][0][RTW89_UKRAINE][21] = 40, + [0][1][2][0][RTW89_MEXICO][21] = 68, + [0][1][2][0][RTW89_CN][21] = 127, + [0][1][2][0][RTW89_QATAR][21] = 40, + [0][1][2][0][RTW89_FCC][23] = 68, + [0][1][2][0][RTW89_ETSI][23] = 46, + [0][1][2][0][RTW89_MKK][23] = 70, + [0][1][2][0][RTW89_IC][23] = 68, + [0][1][2][0][RTW89_KCC][23] = 66, + [0][1][2][0][RTW89_ACMA][23] = 46, + [0][1][2][0][RTW89_CHILE][23] = 42, + [0][1][2][0][RTW89_UKRAINE][23] = 40, + [0][1][2][0][RTW89_MEXICO][23] = 68, + [0][1][2][0][RTW89_CN][23] = 127, + [0][1][2][0][RTW89_QATAR][23] = 40, + [0][1][2][0][RTW89_FCC][25] = 68, + [0][1][2][0][RTW89_ETSI][25] = 46, + [0][1][2][0][RTW89_MKK][25] = 70, + [0][1][2][0][RTW89_IC][25] = 127, + [0][1][2][0][RTW89_KCC][25] = 66, + [0][1][2][0][RTW89_ACMA][25] = 127, + [0][1][2][0][RTW89_CHILE][25] = 42, + [0][1][2][0][RTW89_UKRAINE][25] = 40, + [0][1][2][0][RTW89_MEXICO][25] = 68, + [0][1][2][0][RTW89_CN][25] = 127, + [0][1][2][0][RTW89_QATAR][25] = 40, + [0][1][2][0][RTW89_FCC][27] = 68, + [0][1][2][0][RTW89_ETSI][27] = 46, + [0][1][2][0][RTW89_MKK][27] = 70, + [0][1][2][0][RTW89_IC][27] = 127, + [0][1][2][0][RTW89_KCC][27] = 66, + [0][1][2][0][RTW89_ACMA][27] = 127, + [0][1][2][0][RTW89_CHILE][27] = 42, + [0][1][2][0][RTW89_UKRAINE][27] = 40, + [0][1][2][0][RTW89_MEXICO][27] = 68, + [0][1][2][0][RTW89_CN][27] = 127, + [0][1][2][0][RTW89_QATAR][27] = 40, + [0][1][2][0][RTW89_FCC][29] = 68, + [0][1][2][0][RTW89_ETSI][29] = 46, + [0][1][2][0][RTW89_MKK][29] = 70, + [0][1][2][0][RTW89_IC][29] = 127, + [0][1][2][0][RTW89_KCC][29] = 66, + [0][1][2][0][RTW89_ACMA][29] = 127, + [0][1][2][0][RTW89_CHILE][29] = 42, + [0][1][2][0][RTW89_UKRAINE][29] = 40, + [0][1][2][0][RTW89_MEXICO][29] = 68, + [0][1][2][0][RTW89_CN][29] = 127, + [0][1][2][0][RTW89_QATAR][29] = 40, + [0][1][2][0][RTW89_FCC][31] = 68, + [0][1][2][0][RTW89_ETSI][31] = 46, + [0][1][2][0][RTW89_MKK][31] = 70, + [0][1][2][0][RTW89_IC][31] = 68, + [0][1][2][0][RTW89_KCC][31] = 66, + [0][1][2][0][RTW89_ACMA][31] = 46, + [0][1][2][0][RTW89_CHILE][31] = 42, + [0][1][2][0][RTW89_UKRAINE][31] = 40, + [0][1][2][0][RTW89_MEXICO][31] = 68, + [0][1][2][0][RTW89_CN][31] = 127, + [0][1][2][0][RTW89_QATAR][31] = 40, + [0][1][2][0][RTW89_FCC][33] = 68, + [0][1][2][0][RTW89_ETSI][33] = 46, + [0][1][2][0][RTW89_MKK][33] = 70, + [0][1][2][0][RTW89_IC][33] = 68, + [0][1][2][0][RTW89_KCC][33] = 66, + [0][1][2][0][RTW89_ACMA][33] = 46, + [0][1][2][0][RTW89_CHILE][33] = 42, + [0][1][2][0][RTW89_UKRAINE][33] = 40, + [0][1][2][0][RTW89_MEXICO][33] = 68, + [0][1][2][0][RTW89_CN][33] = 127, + [0][1][2][0][RTW89_QATAR][33] = 40, + [0][1][2][0][RTW89_FCC][35] = 64, + [0][1][2][0][RTW89_ETSI][35] = 46, + [0][1][2][0][RTW89_MKK][35] = 70, + [0][1][2][0][RTW89_IC][35] = 64, + [0][1][2][0][RTW89_KCC][35] = 66, + [0][1][2][0][RTW89_ACMA][35] = 46, + [0][1][2][0][RTW89_CHILE][35] = 42, + [0][1][2][0][RTW89_UKRAINE][35] = 40, + [0][1][2][0][RTW89_MEXICO][35] = 64, + [0][1][2][0][RTW89_CN][35] = 127, + [0][1][2][0][RTW89_QATAR][35] = 40, + [0][1][2][0][RTW89_FCC][37] = 68, + [0][1][2][0][RTW89_ETSI][37] = 127, + [0][1][2][0][RTW89_MKK][37] = 70, + [0][1][2][0][RTW89_IC][37] = 68, + [0][1][2][0][RTW89_KCC][37] = 66, + [0][1][2][0][RTW89_ACMA][37] = 68, + [0][1][2][0][RTW89_CHILE][37] = 42, + [0][1][2][0][RTW89_UKRAINE][37] = 127, + [0][1][2][0][RTW89_MEXICO][37] = 68, + [0][1][2][0][RTW89_CN][37] = 127, + [0][1][2][0][RTW89_QATAR][37] = 127, + [0][1][2][0][RTW89_FCC][38] = 76, + [0][1][2][0][RTW89_ETSI][38] = 16, + [0][1][2][0][RTW89_MKK][38] = 127, + [0][1][2][0][RTW89_IC][38] = 76, + [0][1][2][0][RTW89_KCC][38] = 66, + [0][1][2][0][RTW89_ACMA][38] = 76, + [0][1][2][0][RTW89_CHILE][38] = 42, + [0][1][2][0][RTW89_UKRAINE][38] = 16, + [0][1][2][0][RTW89_MEXICO][38] = 76, + [0][1][2][0][RTW89_CN][38] = 68, + [0][1][2][0][RTW89_QATAR][38] = 16, + [0][1][2][0][RTW89_FCC][40] = 76, + [0][1][2][0][RTW89_ETSI][40] = 16, + [0][1][2][0][RTW89_MKK][40] = 127, + [0][1][2][0][RTW89_IC][40] = 76, + [0][1][2][0][RTW89_KCC][40] = 66, + [0][1][2][0][RTW89_ACMA][40] = 76, + [0][1][2][0][RTW89_CHILE][40] = 42, + [0][1][2][0][RTW89_UKRAINE][40] = 16, + [0][1][2][0][RTW89_MEXICO][40] = 76, + [0][1][2][0][RTW89_CN][40] = 76, + [0][1][2][0][RTW89_QATAR][40] = 16, + [0][1][2][0][RTW89_FCC][42] = 76, + [0][1][2][0][RTW89_ETSI][42] = 16, + [0][1][2][0][RTW89_MKK][42] = 127, + [0][1][2][0][RTW89_IC][42] = 76, + [0][1][2][0][RTW89_KCC][42] = 66, + [0][1][2][0][RTW89_ACMA][42] = 76, + [0][1][2][0][RTW89_CHILE][42] = 42, + [0][1][2][0][RTW89_UKRAINE][42] = 16, + [0][1][2][0][RTW89_MEXICO][42] = 76, + [0][1][2][0][RTW89_CN][42] = 76, + [0][1][2][0][RTW89_QATAR][42] = 16, + [0][1][2][0][RTW89_FCC][44] = 76, + [0][1][2][0][RTW89_ETSI][44] = 16, + [0][1][2][0][RTW89_MKK][44] = 127, + [0][1][2][0][RTW89_IC][44] = 76, + [0][1][2][0][RTW89_KCC][44] = 66, + [0][1][2][0][RTW89_ACMA][44] = 76, + [0][1][2][0][RTW89_CHILE][44] = 42, + [0][1][2][0][RTW89_UKRAINE][44] = 16, + [0][1][2][0][RTW89_MEXICO][44] = 76, + [0][1][2][0][RTW89_CN][44] = 76, + [0][1][2][0][RTW89_QATAR][44] = 16, + [0][1][2][0][RTW89_FCC][46] = 76, + [0][1][2][0][RTW89_ETSI][46] = 16, + [0][1][2][0][RTW89_MKK][46] = 127, + [0][1][2][0][RTW89_IC][46] = 76, + [0][1][2][0][RTW89_KCC][46] = 66, + [0][1][2][0][RTW89_ACMA][46] = 76, + [0][1][2][0][RTW89_CHILE][46] = 42, + [0][1][2][0][RTW89_UKRAINE][46] = 16, + [0][1][2][0][RTW89_MEXICO][46] = 76, + [0][1][2][0][RTW89_CN][46] = 76, + [0][1][2][0][RTW89_QATAR][46] = 16, + [0][1][2][1][RTW89_FCC][0] = 68, + [0][1][2][1][RTW89_ETSI][0] = 34, + [0][1][2][1][RTW89_MKK][0] = 50, + [0][1][2][1][RTW89_IC][0] = 38, + [0][1][2][1][RTW89_KCC][0] = 68, + [0][1][2][1][RTW89_ACMA][0] = 34, + [0][1][2][1][RTW89_CHILE][0] = 6, + [0][1][2][1][RTW89_UKRAINE][0] = 28, + [0][1][2][1][RTW89_MEXICO][0] = 50, + [0][1][2][1][RTW89_CN][0] = 34, + [0][1][2][1][RTW89_QATAR][0] = 34, + [0][1][2][1][RTW89_FCC][2] = 68, + [0][1][2][1][RTW89_ETSI][2] = 34, + [0][1][2][1][RTW89_MKK][2] = 50, + [0][1][2][1][RTW89_IC][2] = 38, + [0][1][2][1][RTW89_KCC][2] = 68, + [0][1][2][1][RTW89_ACMA][2] = 34, + [0][1][2][1][RTW89_CHILE][2] = 6, + [0][1][2][1][RTW89_UKRAINE][2] = 28, + [0][1][2][1][RTW89_MEXICO][2] = 50, + [0][1][2][1][RTW89_CN][2] = 34, + [0][1][2][1][RTW89_QATAR][2] = 34, + [0][1][2][1][RTW89_FCC][4] = 68, + [0][1][2][1][RTW89_ETSI][4] = 34, + [0][1][2][1][RTW89_MKK][4] = 50, + [0][1][2][1][RTW89_IC][4] = 38, + [0][1][2][1][RTW89_KCC][4] = 68, + [0][1][2][1][RTW89_ACMA][4] = 34, + [0][1][2][1][RTW89_CHILE][4] = 6, + [0][1][2][1][RTW89_UKRAINE][4] = 28, + [0][1][2][1][RTW89_MEXICO][4] = 50, + [0][1][2][1][RTW89_CN][4] = 34, + [0][1][2][1][RTW89_QATAR][4] = 34, + [0][1][2][1][RTW89_FCC][6] = 68, + [0][1][2][1][RTW89_ETSI][6] = 34, + [0][1][2][1][RTW89_MKK][6] = 50, + [0][1][2][1][RTW89_IC][6] = 38, + [0][1][2][1][RTW89_KCC][6] = 38, + [0][1][2][1][RTW89_ACMA][6] = 34, + [0][1][2][1][RTW89_CHILE][6] = 6, + [0][1][2][1][RTW89_UKRAINE][6] = 28, + [0][1][2][1][RTW89_MEXICO][6] = 50, + [0][1][2][1][RTW89_CN][6] = 34, + [0][1][2][1][RTW89_QATAR][6] = 34, + [0][1][2][1][RTW89_FCC][8] = 68, + [0][1][2][1][RTW89_ETSI][8] = 34, + [0][1][2][1][RTW89_MKK][8] = 50, + [0][1][2][1][RTW89_IC][8] = 38, + [0][1][2][1][RTW89_KCC][8] = 68, + [0][1][2][1][RTW89_ACMA][8] = 34, + [0][1][2][1][RTW89_CHILE][8] = 30, + [0][1][2][1][RTW89_UKRAINE][8] = 28, + [0][1][2][1][RTW89_MEXICO][8] = 68, + [0][1][2][1][RTW89_CN][8] = 34, + [0][1][2][1][RTW89_QATAR][8] = 34, + [0][1][2][1][RTW89_FCC][10] = 68, + [0][1][2][1][RTW89_ETSI][10] = 34, + [0][1][2][1][RTW89_MKK][10] = 50, + [0][1][2][1][RTW89_IC][10] = 38, + [0][1][2][1][RTW89_KCC][10] = 68, + [0][1][2][1][RTW89_ACMA][10] = 34, + [0][1][2][1][RTW89_CHILE][10] = 30, + [0][1][2][1][RTW89_UKRAINE][10] = 28, + [0][1][2][1][RTW89_MEXICO][10] = 68, + [0][1][2][1][RTW89_CN][10] = 34, + [0][1][2][1][RTW89_QATAR][10] = 34, + [0][1][2][1][RTW89_FCC][12] = 68, + [0][1][2][1][RTW89_ETSI][12] = 34, + [0][1][2][1][RTW89_MKK][12] = 50, + [0][1][2][1][RTW89_IC][12] = 38, + [0][1][2][1][RTW89_KCC][12] = 68, + [0][1][2][1][RTW89_ACMA][12] = 34, + [0][1][2][1][RTW89_CHILE][12] = 30, + [0][1][2][1][RTW89_UKRAINE][12] = 28, + [0][1][2][1][RTW89_MEXICO][12] = 68, + [0][1][2][1][RTW89_CN][12] = 34, + [0][1][2][1][RTW89_QATAR][12] = 34, + [0][1][2][1][RTW89_FCC][14] = 68, + [0][1][2][1][RTW89_ETSI][14] = 34, + [0][1][2][1][RTW89_MKK][14] = 50, + [0][1][2][1][RTW89_IC][14] = 38, + [0][1][2][1][RTW89_KCC][14] = 68, + [0][1][2][1][RTW89_ACMA][14] = 34, + [0][1][2][1][RTW89_CHILE][14] = 30, + [0][1][2][1][RTW89_UKRAINE][14] = 28, + [0][1][2][1][RTW89_MEXICO][14] = 68, + [0][1][2][1][RTW89_CN][14] = 34, + [0][1][2][1][RTW89_QATAR][14] = 34, + [0][1][2][1][RTW89_FCC][15] = 68, + [0][1][2][1][RTW89_ETSI][15] = 34, + [0][1][2][1][RTW89_MKK][15] = 70, + [0][1][2][1][RTW89_IC][15] = 62, + [0][1][2][1][RTW89_KCC][15] = 66, + [0][1][2][1][RTW89_ACMA][15] = 34, + [0][1][2][1][RTW89_CHILE][15] = 30, + [0][1][2][1][RTW89_UKRAINE][15] = 28, + [0][1][2][1][RTW89_MEXICO][15] = 68, + [0][1][2][1][RTW89_CN][15] = 127, + [0][1][2][1][RTW89_QATAR][15] = 28, + [0][1][2][1][RTW89_FCC][17] = 68, + [0][1][2][1][RTW89_ETSI][17] = 34, + [0][1][2][1][RTW89_MKK][17] = 70, + [0][1][2][1][RTW89_IC][17] = 62, + [0][1][2][1][RTW89_KCC][17] = 66, + [0][1][2][1][RTW89_ACMA][17] = 34, + [0][1][2][1][RTW89_CHILE][17] = 30, + [0][1][2][1][RTW89_UKRAINE][17] = 28, + [0][1][2][1][RTW89_MEXICO][17] = 68, + [0][1][2][1][RTW89_CN][17] = 127, + [0][1][2][1][RTW89_QATAR][17] = 28, + [0][1][2][1][RTW89_FCC][19] = 68, + [0][1][2][1][RTW89_ETSI][19] = 34, + [0][1][2][1][RTW89_MKK][19] = 70, + [0][1][2][1][RTW89_IC][19] = 62, + [0][1][2][1][RTW89_KCC][19] = 66, + [0][1][2][1][RTW89_ACMA][19] = 34, + [0][1][2][1][RTW89_CHILE][19] = 30, + [0][1][2][1][RTW89_UKRAINE][19] = 28, + [0][1][2][1][RTW89_MEXICO][19] = 68, + [0][1][2][1][RTW89_CN][19] = 127, + [0][1][2][1][RTW89_QATAR][19] = 28, + [0][1][2][1][RTW89_FCC][21] = 68, + [0][1][2][1][RTW89_ETSI][21] = 34, + [0][1][2][1][RTW89_MKK][21] = 70, + [0][1][2][1][RTW89_IC][21] = 62, + [0][1][2][1][RTW89_KCC][21] = 66, + [0][1][2][1][RTW89_ACMA][21] = 34, + [0][1][2][1][RTW89_CHILE][21] = 30, + [0][1][2][1][RTW89_UKRAINE][21] = 28, + [0][1][2][1][RTW89_MEXICO][21] = 68, + [0][1][2][1][RTW89_CN][21] = 127, + [0][1][2][1][RTW89_QATAR][21] = 28, + [0][1][2][1][RTW89_FCC][23] = 68, + [0][1][2][1][RTW89_ETSI][23] = 34, + [0][1][2][1][RTW89_MKK][23] = 70, + [0][1][2][1][RTW89_IC][23] = 62, + [0][1][2][1][RTW89_KCC][23] = 66, + [0][1][2][1][RTW89_ACMA][23] = 34, + [0][1][2][1][RTW89_CHILE][23] = 30, + [0][1][2][1][RTW89_UKRAINE][23] = 28, + [0][1][2][1][RTW89_MEXICO][23] = 68, + [0][1][2][1][RTW89_CN][23] = 127, + [0][1][2][1][RTW89_QATAR][23] = 28, + [0][1][2][1][RTW89_FCC][25] = 68, + [0][1][2][1][RTW89_ETSI][25] = 34, + [0][1][2][1][RTW89_MKK][25] = 70, + [0][1][2][1][RTW89_IC][25] = 127, + [0][1][2][1][RTW89_KCC][25] = 66, + [0][1][2][1][RTW89_ACMA][25] = 127, + [0][1][2][1][RTW89_CHILE][25] = 30, + [0][1][2][1][RTW89_UKRAINE][25] = 28, + [0][1][2][1][RTW89_MEXICO][25] = 68, + [0][1][2][1][RTW89_CN][25] = 127, + [0][1][2][1][RTW89_QATAR][25] = 28, + [0][1][2][1][RTW89_FCC][27] = 68, + [0][1][2][1][RTW89_ETSI][27] = 34, + [0][1][2][1][RTW89_MKK][27] = 70, + [0][1][2][1][RTW89_IC][27] = 127, + [0][1][2][1][RTW89_KCC][27] = 66, + [0][1][2][1][RTW89_ACMA][27] = 127, + [0][1][2][1][RTW89_CHILE][27] = 30, + [0][1][2][1][RTW89_UKRAINE][27] = 28, + [0][1][2][1][RTW89_MEXICO][27] = 68, + [0][1][2][1][RTW89_CN][27] = 127, + [0][1][2][1][RTW89_QATAR][27] = 28, + [0][1][2][1][RTW89_FCC][29] = 68, + [0][1][2][1][RTW89_ETSI][29] = 34, + [0][1][2][1][RTW89_MKK][29] = 70, + [0][1][2][1][RTW89_IC][29] = 127, + [0][1][2][1][RTW89_KCC][29] = 66, + [0][1][2][1][RTW89_ACMA][29] = 127, + [0][1][2][1][RTW89_CHILE][29] = 30, + [0][1][2][1][RTW89_UKRAINE][29] = 28, + [0][1][2][1][RTW89_MEXICO][29] = 68, + [0][1][2][1][RTW89_CN][29] = 127, + [0][1][2][1][RTW89_QATAR][29] = 28, + [0][1][2][1][RTW89_FCC][31] = 68, + [0][1][2][1][RTW89_ETSI][31] = 34, + [0][1][2][1][RTW89_MKK][31] = 70, + [0][1][2][1][RTW89_IC][31] = 62, + [0][1][2][1][RTW89_KCC][31] = 66, + [0][1][2][1][RTW89_ACMA][31] = 34, + [0][1][2][1][RTW89_CHILE][31] = 30, + [0][1][2][1][RTW89_UKRAINE][31] = 28, + [0][1][2][1][RTW89_MEXICO][31] = 68, + [0][1][2][1][RTW89_CN][31] = 127, + [0][1][2][1][RTW89_QATAR][31] = 28, + [0][1][2][1][RTW89_FCC][33] = 68, + [0][1][2][1][RTW89_ETSI][33] = 34, + [0][1][2][1][RTW89_MKK][33] = 70, + [0][1][2][1][RTW89_IC][33] = 62, + [0][1][2][1][RTW89_KCC][33] = 66, + [0][1][2][1][RTW89_ACMA][33] = 34, + [0][1][2][1][RTW89_CHILE][33] = 30, + [0][1][2][1][RTW89_UKRAINE][33] = 28, + [0][1][2][1][RTW89_MEXICO][33] = 68, + [0][1][2][1][RTW89_CN][33] = 127, + [0][1][2][1][RTW89_QATAR][33] = 28, + [0][1][2][1][RTW89_FCC][35] = 64, + [0][1][2][1][RTW89_ETSI][35] = 34, + [0][1][2][1][RTW89_MKK][35] = 70, + [0][1][2][1][RTW89_IC][35] = 62, + [0][1][2][1][RTW89_KCC][35] = 66, + [0][1][2][1][RTW89_ACMA][35] = 34, + [0][1][2][1][RTW89_CHILE][35] = 30, + [0][1][2][1][RTW89_UKRAINE][35] = 28, + [0][1][2][1][RTW89_MEXICO][35] = 64, + [0][1][2][1][RTW89_CN][35] = 127, + [0][1][2][1][RTW89_QATAR][35] = 28, + [0][1][2][1][RTW89_FCC][37] = 68, + [0][1][2][1][RTW89_ETSI][37] = 127, + [0][1][2][1][RTW89_MKK][37] = 70, + [0][1][2][1][RTW89_IC][37] = 62, + [0][1][2][1][RTW89_KCC][37] = 66, + [0][1][2][1][RTW89_ACMA][37] = 68, + [0][1][2][1][RTW89_CHILE][37] = 30, + [0][1][2][1][RTW89_UKRAINE][37] = 127, + [0][1][2][1][RTW89_MEXICO][37] = 68, + [0][1][2][1][RTW89_CN][37] = 127, + [0][1][2][1][RTW89_QATAR][37] = 127, + [0][1][2][1][RTW89_FCC][38] = 76, + [0][1][2][1][RTW89_ETSI][38] = 4, + [0][1][2][1][RTW89_MKK][38] = 127, + [0][1][2][1][RTW89_IC][38] = 76, + [0][1][2][1][RTW89_KCC][38] = 66, + [0][1][2][1][RTW89_ACMA][38] = 76, + [0][1][2][1][RTW89_CHILE][38] = 30, + [0][1][2][1][RTW89_UKRAINE][38] = 4, + [0][1][2][1][RTW89_MEXICO][38] = 76, + [0][1][2][1][RTW89_CN][38] = 68, + [0][1][2][1][RTW89_QATAR][38] = 4, + [0][1][2][1][RTW89_FCC][40] = 76, + [0][1][2][1][RTW89_ETSI][40] = 4, + [0][1][2][1][RTW89_MKK][40] = 127, + [0][1][2][1][RTW89_IC][40] = 76, + [0][1][2][1][RTW89_KCC][40] = 66, + [0][1][2][1][RTW89_ACMA][40] = 76, + [0][1][2][1][RTW89_CHILE][40] = 30, + [0][1][2][1][RTW89_UKRAINE][40] = 4, + [0][1][2][1][RTW89_MEXICO][40] = 76, + [0][1][2][1][RTW89_CN][40] = 70, + [0][1][2][1][RTW89_QATAR][40] = 4, + [0][1][2][1][RTW89_FCC][42] = 76, + [0][1][2][1][RTW89_ETSI][42] = 4, + [0][1][2][1][RTW89_MKK][42] = 127, + [0][1][2][1][RTW89_IC][42] = 76, + [0][1][2][1][RTW89_KCC][42] = 66, + [0][1][2][1][RTW89_ACMA][42] = 76, + [0][1][2][1][RTW89_CHILE][42] = 30, + [0][1][2][1][RTW89_UKRAINE][42] = 4, + [0][1][2][1][RTW89_MEXICO][42] = 76, + [0][1][2][1][RTW89_CN][42] = 70, + [0][1][2][1][RTW89_QATAR][42] = 4, + [0][1][2][1][RTW89_FCC][44] = 76, + [0][1][2][1][RTW89_ETSI][44] = 4, + [0][1][2][1][RTW89_MKK][44] = 127, + [0][1][2][1][RTW89_IC][44] = 76, + [0][1][2][1][RTW89_KCC][44] = 66, + [0][1][2][1][RTW89_ACMA][44] = 76, + [0][1][2][1][RTW89_CHILE][44] = 30, + [0][1][2][1][RTW89_UKRAINE][44] = 4, + [0][1][2][1][RTW89_MEXICO][44] = 76, + [0][1][2][1][RTW89_CN][44] = 70, + [0][1][2][1][RTW89_QATAR][44] = 4, + [0][1][2][1][RTW89_FCC][46] = 76, + [0][1][2][1][RTW89_ETSI][46] = 4, + [0][1][2][1][RTW89_MKK][46] = 127, + [0][1][2][1][RTW89_IC][46] = 76, + [0][1][2][1][RTW89_KCC][46] = 66, + [0][1][2][1][RTW89_ACMA][46] = 76, + [0][1][2][1][RTW89_CHILE][46] = 30, + [0][1][2][1][RTW89_UKRAINE][46] = 4, + [0][1][2][1][RTW89_MEXICO][46] = 76, + [0][1][2][1][RTW89_CN][46] = 70, + [0][1][2][1][RTW89_QATAR][46] = 4, + [1][0][2][0][RTW89_FCC][1] = 68, + [1][0][2][0][RTW89_ETSI][1] = 64, + [1][0][2][0][RTW89_MKK][1] = 62, + [1][0][2][0][RTW89_IC][1] = 64, + [1][0][2][0][RTW89_KCC][1] = 72, + [1][0][2][0][RTW89_ACMA][1] = 64, + [1][0][2][0][RTW89_CHILE][1] = 30, + [1][0][2][0][RTW89_UKRAINE][1] = 52, + [1][0][2][0][RTW89_MEXICO][1] = 62, + [1][0][2][0][RTW89_CN][1] = 64, + [1][0][2][0][RTW89_QATAR][1] = 64, + [1][0][2][0][RTW89_FCC][5] = 72, + [1][0][2][0][RTW89_ETSI][5] = 64, + [1][0][2][0][RTW89_MKK][5] = 62, + [1][0][2][0][RTW89_IC][5] = 64, + [1][0][2][0][RTW89_KCC][5] = 72, + [1][0][2][0][RTW89_ACMA][5] = 64, + [1][0][2][0][RTW89_CHILE][5] = 30, + [1][0][2][0][RTW89_UKRAINE][5] = 52, + [1][0][2][0][RTW89_MEXICO][5] = 62, + [1][0][2][0][RTW89_CN][5] = 64, + [1][0][2][0][RTW89_QATAR][5] = 64, + [1][0][2][0][RTW89_FCC][9] = 72, + [1][0][2][0][RTW89_ETSI][9] = 64, + [1][0][2][0][RTW89_MKK][9] = 62, + [1][0][2][0][RTW89_IC][9] = 64, + [1][0][2][0][RTW89_KCC][9] = 72, + [1][0][2][0][RTW89_ACMA][9] = 64, + [1][0][2][0][RTW89_CHILE][9] = 54, + [1][0][2][0][RTW89_UKRAINE][9] = 52, + [1][0][2][0][RTW89_MEXICO][9] = 72, + [1][0][2][0][RTW89_CN][9] = 64, + [1][0][2][0][RTW89_QATAR][9] = 64, + [1][0][2][0][RTW89_FCC][13] = 66, + [1][0][2][0][RTW89_ETSI][13] = 64, + [1][0][2][0][RTW89_MKK][13] = 62, + [1][0][2][0][RTW89_IC][13] = 64, + [1][0][2][0][RTW89_KCC][13] = 68, + [1][0][2][0][RTW89_ACMA][13] = 64, + [1][0][2][0][RTW89_CHILE][13] = 54, + [1][0][2][0][RTW89_UKRAINE][13] = 52, + [1][0][2][0][RTW89_MEXICO][13] = 66, + [1][0][2][0][RTW89_CN][13] = 64, + [1][0][2][0][RTW89_QATAR][13] = 64, + [1][0][2][0][RTW89_FCC][16] = 62, + [1][0][2][0][RTW89_ETSI][16] = 64, + [1][0][2][0][RTW89_MKK][16] = 72, + [1][0][2][0][RTW89_IC][16] = 62, + [1][0][2][0][RTW89_KCC][16] = 72, + [1][0][2][0][RTW89_ACMA][16] = 64, + [1][0][2][0][RTW89_CHILE][16] = 54, + [1][0][2][0][RTW89_UKRAINE][16] = 52, + [1][0][2][0][RTW89_MEXICO][16] = 62, + [1][0][2][0][RTW89_CN][16] = 127, + [1][0][2][0][RTW89_QATAR][16] = 52, + [1][0][2][0][RTW89_FCC][20] = 72, + [1][0][2][0][RTW89_ETSI][20] = 64, + [1][0][2][0][RTW89_MKK][20] = 72, + [1][0][2][0][RTW89_IC][20] = 72, + [1][0][2][0][RTW89_KCC][20] = 72, + [1][0][2][0][RTW89_ACMA][20] = 64, + [1][0][2][0][RTW89_CHILE][20] = 54, + [1][0][2][0][RTW89_UKRAINE][20] = 52, + [1][0][2][0][RTW89_MEXICO][20] = 72, + [1][0][2][0][RTW89_CN][20] = 127, + [1][0][2][0][RTW89_QATAR][20] = 52, + [1][0][2][0][RTW89_FCC][24] = 72, + [1][0][2][0][RTW89_ETSI][24] = 64, + [1][0][2][0][RTW89_MKK][24] = 72, + [1][0][2][0][RTW89_IC][24] = 127, + [1][0][2][0][RTW89_KCC][24] = 72, + [1][0][2][0][RTW89_ACMA][24] = 127, + [1][0][2][0][RTW89_CHILE][24] = 54, + [1][0][2][0][RTW89_UKRAINE][24] = 52, + [1][0][2][0][RTW89_MEXICO][24] = 72, + [1][0][2][0][RTW89_CN][24] = 127, + [1][0][2][0][RTW89_QATAR][24] = 52, + [1][0][2][0][RTW89_FCC][28] = 72, + [1][0][2][0][RTW89_ETSI][28] = 64, + [1][0][2][0][RTW89_MKK][28] = 72, + [1][0][2][0][RTW89_IC][28] = 127, + [1][0][2][0][RTW89_KCC][28] = 72, + [1][0][2][0][RTW89_ACMA][28] = 127, + [1][0][2][0][RTW89_CHILE][28] = 54, + [1][0][2][0][RTW89_UKRAINE][28] = 52, + [1][0][2][0][RTW89_MEXICO][28] = 72, + [1][0][2][0][RTW89_CN][28] = 127, + [1][0][2][0][RTW89_QATAR][28] = 52, + [1][0][2][0][RTW89_FCC][32] = 72, + [1][0][2][0][RTW89_ETSI][32] = 64, + [1][0][2][0][RTW89_MKK][32] = 72, + [1][0][2][0][RTW89_IC][32] = 72, + [1][0][2][0][RTW89_KCC][32] = 72, + [1][0][2][0][RTW89_ACMA][32] = 64, + [1][0][2][0][RTW89_CHILE][32] = 54, + [1][0][2][0][RTW89_UKRAINE][32] = 52, + [1][0][2][0][RTW89_MEXICO][32] = 72, + [1][0][2][0][RTW89_CN][32] = 127, + [1][0][2][0][RTW89_QATAR][32] = 52, + [1][0][2][0][RTW89_FCC][36] = 72, + [1][0][2][0][RTW89_ETSI][36] = 127, + [1][0][2][0][RTW89_MKK][36] = 72, + [1][0][2][0][RTW89_IC][36] = 72, + [1][0][2][0][RTW89_KCC][36] = 72, + [1][0][2][0][RTW89_ACMA][36] = 72, + [1][0][2][0][RTW89_CHILE][36] = 54, + [1][0][2][0][RTW89_UKRAINE][36] = 127, + [1][0][2][0][RTW89_MEXICO][36] = 72, + [1][0][2][0][RTW89_CN][36] = 127, + [1][0][2][0][RTW89_QATAR][36] = 127, + [1][0][2][0][RTW89_FCC][39] = 72, + [1][0][2][0][RTW89_ETSI][39] = 28, + [1][0][2][0][RTW89_MKK][39] = 127, + [1][0][2][0][RTW89_IC][39] = 72, + [1][0][2][0][RTW89_KCC][39] = 72, + [1][0][2][0][RTW89_ACMA][39] = 72, + [1][0][2][0][RTW89_CHILE][39] = 54, + [1][0][2][0][RTW89_UKRAINE][39] = 28, + [1][0][2][0][RTW89_MEXICO][39] = 72, + [1][0][2][0][RTW89_CN][39] = 68, + [1][0][2][0][RTW89_QATAR][39] = 28, + [1][0][2][0][RTW89_FCC][43] = 72, + [1][0][2][0][RTW89_ETSI][43] = 28, + [1][0][2][0][RTW89_MKK][43] = 127, + [1][0][2][0][RTW89_IC][43] = 72, + [1][0][2][0][RTW89_KCC][43] = 72, + [1][0][2][0][RTW89_ACMA][43] = 72, + [1][0][2][0][RTW89_CHILE][43] = 54, + [1][0][2][0][RTW89_UKRAINE][43] = 28, + [1][0][2][0][RTW89_MEXICO][43] = 72, + [1][0][2][0][RTW89_CN][43] = 72, + [1][0][2][0][RTW89_QATAR][43] = 28, + [1][1][2][0][RTW89_FCC][1] = 58, + [1][1][2][0][RTW89_ETSI][1] = 52, + [1][1][2][0][RTW89_MKK][1] = 50, + [1][1][2][0][RTW89_IC][1] = 52, + [1][1][2][0][RTW89_KCC][1] = 66, + [1][1][2][0][RTW89_ACMA][1] = 52, + [1][1][2][0][RTW89_CHILE][1] = 18, + [1][1][2][0][RTW89_UKRAINE][1] = 40, + [1][1][2][0][RTW89_MEXICO][1] = 50, + [1][1][2][0][RTW89_CN][1] = 52, + [1][1][2][0][RTW89_QATAR][1] = 52, + [1][1][2][0][RTW89_FCC][5] = 72, + [1][1][2][0][RTW89_ETSI][5] = 52, + [1][1][2][0][RTW89_MKK][5] = 50, + [1][1][2][0][RTW89_IC][5] = 52, + [1][1][2][0][RTW89_KCC][5] = 50, + [1][1][2][0][RTW89_ACMA][5] = 52, + [1][1][2][0][RTW89_CHILE][5] = 18, + [1][1][2][0][RTW89_UKRAINE][5] = 40, + [1][1][2][0][RTW89_MEXICO][5] = 50, + [1][1][2][0][RTW89_CN][5] = 52, + [1][1][2][0][RTW89_QATAR][5] = 52, + [1][1][2][0][RTW89_FCC][9] = 72, + [1][1][2][0][RTW89_ETSI][9] = 52, + [1][1][2][0][RTW89_MKK][9] = 50, + [1][1][2][0][RTW89_IC][9] = 52, + [1][1][2][0][RTW89_KCC][9] = 66, + [1][1][2][0][RTW89_ACMA][9] = 52, + [1][1][2][0][RTW89_CHILE][9] = 42, + [1][1][2][0][RTW89_UKRAINE][9] = 40, + [1][1][2][0][RTW89_MEXICO][9] = 72, + [1][1][2][0][RTW89_CN][9] = 52, + [1][1][2][0][RTW89_QATAR][9] = 52, + [1][1][2][0][RTW89_FCC][13] = 58, + [1][1][2][0][RTW89_ETSI][13] = 52, + [1][1][2][0][RTW89_MKK][13] = 50, + [1][1][2][0][RTW89_IC][13] = 52, + [1][1][2][0][RTW89_KCC][13] = 66, + [1][1][2][0][RTW89_ACMA][13] = 52, + [1][1][2][0][RTW89_CHILE][13] = 42, + [1][1][2][0][RTW89_UKRAINE][13] = 40, + [1][1][2][0][RTW89_MEXICO][13] = 58, + [1][1][2][0][RTW89_CN][13] = 52, + [1][1][2][0][RTW89_QATAR][13] = 52, + [1][1][2][0][RTW89_FCC][16] = 56, + [1][1][2][0][RTW89_ETSI][16] = 52, + [1][1][2][0][RTW89_MKK][16] = 72, + [1][1][2][0][RTW89_IC][16] = 56, + [1][1][2][0][RTW89_KCC][16] = 64, + [1][1][2][0][RTW89_ACMA][16] = 52, + [1][1][2][0][RTW89_CHILE][16] = 42, + [1][1][2][0][RTW89_UKRAINE][16] = 40, + [1][1][2][0][RTW89_MEXICO][16] = 56, + [1][1][2][0][RTW89_CN][16] = 127, + [1][1][2][0][RTW89_QATAR][16] = 40, + [1][1][2][0][RTW89_FCC][20] = 72, + [1][1][2][0][RTW89_ETSI][20] = 52, + [1][1][2][0][RTW89_MKK][20] = 72, + [1][1][2][0][RTW89_IC][20] = 72, + [1][1][2][0][RTW89_KCC][20] = 66, + [1][1][2][0][RTW89_ACMA][20] = 52, + [1][1][2][0][RTW89_CHILE][20] = 42, + [1][1][2][0][RTW89_UKRAINE][20] = 40, + [1][1][2][0][RTW89_MEXICO][20] = 72, + [1][1][2][0][RTW89_CN][20] = 127, + [1][1][2][0][RTW89_QATAR][20] = 40, + [1][1][2][0][RTW89_FCC][24] = 72, + [1][1][2][0][RTW89_ETSI][24] = 52, + [1][1][2][0][RTW89_MKK][24] = 72, + [1][1][2][0][RTW89_IC][24] = 127, + [1][1][2][0][RTW89_KCC][24] = 66, + [1][1][2][0][RTW89_ACMA][24] = 127, + [1][1][2][0][RTW89_CHILE][24] = 42, + [1][1][2][0][RTW89_UKRAINE][24] = 40, + [1][1][2][0][RTW89_MEXICO][24] = 72, + [1][1][2][0][RTW89_CN][24] = 127, + [1][1][2][0][RTW89_QATAR][24] = 40, + [1][1][2][0][RTW89_FCC][28] = 72, + [1][1][2][0][RTW89_ETSI][28] = 52, + [1][1][2][0][RTW89_MKK][28] = 72, + [1][1][2][0][RTW89_IC][28] = 127, + [1][1][2][0][RTW89_KCC][28] = 66, + [1][1][2][0][RTW89_ACMA][28] = 127, + [1][1][2][0][RTW89_CHILE][28] = 42, + [1][1][2][0][RTW89_UKRAINE][28] = 40, + [1][1][2][0][RTW89_MEXICO][28] = 72, + [1][1][2][0][RTW89_CN][28] = 127, + [1][1][2][0][RTW89_QATAR][28] = 40, + [1][1][2][0][RTW89_FCC][32] = 68, + [1][1][2][0][RTW89_ETSI][32] = 52, + [1][1][2][0][RTW89_MKK][32] = 72, + [1][1][2][0][RTW89_IC][32] = 68, + [1][1][2][0][RTW89_KCC][32] = 66, + [1][1][2][0][RTW89_ACMA][32] = 52, + [1][1][2][0][RTW89_CHILE][32] = 42, + [1][1][2][0][RTW89_UKRAINE][32] = 40, + [1][1][2][0][RTW89_MEXICO][32] = 68, + [1][1][2][0][RTW89_CN][32] = 127, + [1][1][2][0][RTW89_QATAR][32] = 40, + [1][1][2][0][RTW89_FCC][36] = 72, + [1][1][2][0][RTW89_ETSI][36] = 127, + [1][1][2][0][RTW89_MKK][36] = 72, + [1][1][2][0][RTW89_IC][36] = 72, + [1][1][2][0][RTW89_KCC][36] = 66, + [1][1][2][0][RTW89_ACMA][36] = 72, + [1][1][2][0][RTW89_CHILE][36] = 42, + [1][1][2][0][RTW89_UKRAINE][36] = 127, + [1][1][2][0][RTW89_MEXICO][36] = 72, + [1][1][2][0][RTW89_CN][36] = 127, + [1][1][2][0][RTW89_QATAR][36] = 127, + [1][1][2][0][RTW89_FCC][39] = 72, + [1][1][2][0][RTW89_ETSI][39] = 16, + [1][1][2][0][RTW89_MKK][39] = 127, + [1][1][2][0][RTW89_IC][39] = 72, + [1][1][2][0][RTW89_KCC][39] = 66, + [1][1][2][0][RTW89_ACMA][39] = 72, + [1][1][2][0][RTW89_CHILE][39] = 42, + [1][1][2][0][RTW89_UKRAINE][39] = 16, + [1][1][2][0][RTW89_MEXICO][39] = 72, + [1][1][2][0][RTW89_CN][39] = 68, + [1][1][2][0][RTW89_QATAR][39] = 16, + [1][1][2][0][RTW89_FCC][43] = 72, + [1][1][2][0][RTW89_ETSI][43] = 16, + [1][1][2][0][RTW89_MKK][43] = 127, + [1][1][2][0][RTW89_IC][43] = 72, + [1][1][2][0][RTW89_KCC][43] = 66, + [1][1][2][0][RTW89_ACMA][43] = 72, + [1][1][2][0][RTW89_CHILE][43] = 42, + [1][1][2][0][RTW89_UKRAINE][43] = 16, + [1][1][2][0][RTW89_MEXICO][43] = 72, + [1][1][2][0][RTW89_CN][43] = 72, + [1][1][2][0][RTW89_QATAR][43] = 16, + [1][1][2][1][RTW89_FCC][1] = 58, + [1][1][2][1][RTW89_ETSI][1] = 40, + [1][1][2][1][RTW89_MKK][1] = 50, + [1][1][2][1][RTW89_IC][1] = 40, + [1][1][2][1][RTW89_KCC][1] = 66, + [1][1][2][1][RTW89_ACMA][1] = 40, + [1][1][2][1][RTW89_CHILE][1] = 6, + [1][1][2][1][RTW89_UKRAINE][1] = 28, + [1][1][2][1][RTW89_MEXICO][1] = 50, + [1][1][2][1][RTW89_CN][1] = 40, + [1][1][2][1][RTW89_QATAR][1] = 40, + [1][1][2][1][RTW89_FCC][5] = 68, + [1][1][2][1][RTW89_ETSI][5] = 40, + [1][1][2][1][RTW89_MKK][5] = 50, + [1][1][2][1][RTW89_IC][5] = 40, + [1][1][2][1][RTW89_KCC][5] = 50, + [1][1][2][1][RTW89_ACMA][5] = 40, + [1][1][2][1][RTW89_CHILE][5] = 6, + [1][1][2][1][RTW89_UKRAINE][5] = 28, + [1][1][2][1][RTW89_MEXICO][5] = 50, + [1][1][2][1][RTW89_CN][5] = 40, + [1][1][2][1][RTW89_QATAR][5] = 40, + [1][1][2][1][RTW89_FCC][9] = 68, + [1][1][2][1][RTW89_ETSI][9] = 40, + [1][1][2][1][RTW89_MKK][9] = 50, + [1][1][2][1][RTW89_IC][9] = 40, + [1][1][2][1][RTW89_KCC][9] = 66, + [1][1][2][1][RTW89_ACMA][9] = 40, + [1][1][2][1][RTW89_CHILE][9] = 30, + [1][1][2][1][RTW89_UKRAINE][9] = 28, + [1][1][2][1][RTW89_MEXICO][9] = 68, + [1][1][2][1][RTW89_CN][9] = 40, + [1][1][2][1][RTW89_QATAR][9] = 40, + [1][1][2][1][RTW89_FCC][13] = 58, + [1][1][2][1][RTW89_ETSI][13] = 40, + [1][1][2][1][RTW89_MKK][13] = 50, + [1][1][2][1][RTW89_IC][13] = 40, + [1][1][2][1][RTW89_KCC][13] = 66, + [1][1][2][1][RTW89_ACMA][13] = 40, + [1][1][2][1][RTW89_CHILE][13] = 30, + [1][1][2][1][RTW89_UKRAINE][13] = 28, + [1][1][2][1][RTW89_MEXICO][13] = 58, + [1][1][2][1][RTW89_CN][13] = 40, + [1][1][2][1][RTW89_QATAR][13] = 40, + [1][1][2][1][RTW89_FCC][16] = 56, + [1][1][2][1][RTW89_ETSI][16] = 40, + [1][1][2][1][RTW89_MKK][16] = 72, + [1][1][2][1][RTW89_IC][16] = 56, + [1][1][2][1][RTW89_KCC][16] = 64, + [1][1][2][1][RTW89_ACMA][16] = 40, + [1][1][2][1][RTW89_CHILE][16] = 30, + [1][1][2][1][RTW89_UKRAINE][16] = 28, + [1][1][2][1][RTW89_MEXICO][16] = 56, + [1][1][2][1][RTW89_CN][16] = 127, + [1][1][2][1][RTW89_QATAR][16] = 28, + [1][1][2][1][RTW89_FCC][20] = 68, + [1][1][2][1][RTW89_ETSI][20] = 40, + [1][1][2][1][RTW89_MKK][20] = 72, + [1][1][2][1][RTW89_IC][20] = 68, + [1][1][2][1][RTW89_KCC][20] = 66, + [1][1][2][1][RTW89_ACMA][20] = 40, + [1][1][2][1][RTW89_CHILE][20] = 30, + [1][1][2][1][RTW89_UKRAINE][20] = 28, + [1][1][2][1][RTW89_MEXICO][20] = 68, + [1][1][2][1][RTW89_CN][20] = 127, + [1][1][2][1][RTW89_QATAR][20] = 28, + [1][1][2][1][RTW89_FCC][24] = 68, + [1][1][2][1][RTW89_ETSI][24] = 40, + [1][1][2][1][RTW89_MKK][24] = 72, + [1][1][2][1][RTW89_IC][24] = 127, + [1][1][2][1][RTW89_KCC][24] = 66, + [1][1][2][1][RTW89_ACMA][24] = 127, + [1][1][2][1][RTW89_CHILE][24] = 30, + [1][1][2][1][RTW89_UKRAINE][24] = 28, + [1][1][2][1][RTW89_MEXICO][24] = 68, + [1][1][2][1][RTW89_CN][24] = 127, + [1][1][2][1][RTW89_QATAR][24] = 28, + [1][1][2][1][RTW89_FCC][28] = 68, + [1][1][2][1][RTW89_ETSI][28] = 40, + [1][1][2][1][RTW89_MKK][28] = 72, + [1][1][2][1][RTW89_IC][28] = 127, + [1][1][2][1][RTW89_KCC][28] = 66, + [1][1][2][1][RTW89_ACMA][28] = 127, + [1][1][2][1][RTW89_CHILE][28] = 30, + [1][1][2][1][RTW89_UKRAINE][28] = 28, + [1][1][2][1][RTW89_MEXICO][28] = 68, + [1][1][2][1][RTW89_CN][28] = 127, + [1][1][2][1][RTW89_QATAR][28] = 28, + [1][1][2][1][RTW89_FCC][32] = 68, + [1][1][2][1][RTW89_ETSI][32] = 40, + [1][1][2][1][RTW89_MKK][32] = 72, + [1][1][2][1][RTW89_IC][32] = 68, + [1][1][2][1][RTW89_KCC][32] = 66, + [1][1][2][1][RTW89_ACMA][32] = 40, + [1][1][2][1][RTW89_CHILE][32] = 30, + [1][1][2][1][RTW89_UKRAINE][32] = 28, + [1][1][2][1][RTW89_MEXICO][32] = 68, + [1][1][2][1][RTW89_CN][32] = 127, + [1][1][2][1][RTW89_QATAR][32] = 28, + [1][1][2][1][RTW89_FCC][36] = 68, + [1][1][2][1][RTW89_ETSI][36] = 127, + [1][1][2][1][RTW89_MKK][36] = 72, + [1][1][2][1][RTW89_IC][36] = 68, + [1][1][2][1][RTW89_KCC][36] = 66, + [1][1][2][1][RTW89_ACMA][36] = 68, + [1][1][2][1][RTW89_CHILE][36] = 30, + [1][1][2][1][RTW89_UKRAINE][36] = 127, + [1][1][2][1][RTW89_MEXICO][36] = 68, + [1][1][2][1][RTW89_CN][36] = 127, + [1][1][2][1][RTW89_QATAR][36] = 127, + [1][1][2][1][RTW89_FCC][39] = 72, + [1][1][2][1][RTW89_ETSI][39] = 4, + [1][1][2][1][RTW89_MKK][39] = 127, + [1][1][2][1][RTW89_IC][39] = 72, + [1][1][2][1][RTW89_KCC][39] = 66, + [1][1][2][1][RTW89_ACMA][39] = 72, + [1][1][2][1][RTW89_CHILE][39] = 30, + [1][1][2][1][RTW89_UKRAINE][39] = 4, + [1][1][2][1][RTW89_MEXICO][39] = 72, + [1][1][2][1][RTW89_CN][39] = 62, + [1][1][2][1][RTW89_QATAR][39] = 4, + [1][1][2][1][RTW89_FCC][43] = 72, + [1][1][2][1][RTW89_ETSI][43] = 4, + [1][1][2][1][RTW89_MKK][43] = 127, + [1][1][2][1][RTW89_IC][43] = 72, + [1][1][2][1][RTW89_KCC][43] = 66, + [1][1][2][1][RTW89_ACMA][43] = 72, + [1][1][2][1][RTW89_CHILE][43] = 30, + [1][1][2][1][RTW89_UKRAINE][43] = 4, + [1][1][2][1][RTW89_MEXICO][43] = 72, + [1][1][2][1][RTW89_CN][43] = 72, + [1][1][2][1][RTW89_QATAR][43] = 4, + [2][0][2][0][RTW89_FCC][3] = 64, + [2][0][2][0][RTW89_ETSI][3] = 64, + [2][0][2][0][RTW89_MKK][3] = 64, + [2][0][2][0][RTW89_IC][3] = 62, + [2][0][2][0][RTW89_KCC][3] = 72, + [2][0][2][0][RTW89_ACMA][3] = 64, + [2][0][2][0][RTW89_CHILE][3] = 30, + [2][0][2][0][RTW89_UKRAINE][3] = 52, + [2][0][2][0][RTW89_MEXICO][3] = 62, + [2][0][2][0][RTW89_CN][3] = 64, + [2][0][2][0][RTW89_QATAR][3] = 64, + [2][0][2][0][RTW89_FCC][11] = 64, + [2][0][2][0][RTW89_ETSI][11] = 64, + [2][0][2][0][RTW89_MKK][11] = 64, + [2][0][2][0][RTW89_IC][11] = 62, + [2][0][2][0][RTW89_KCC][11] = 72, + [2][0][2][0][RTW89_ACMA][11] = 64, + [2][0][2][0][RTW89_CHILE][11] = 54, + [2][0][2][0][RTW89_UKRAINE][11] = 52, + [2][0][2][0][RTW89_MEXICO][11] = 64, + [2][0][2][0][RTW89_CN][11] = 64, + [2][0][2][0][RTW89_QATAR][11] = 64, + [2][0][2][0][RTW89_FCC][18] = 62, + [2][0][2][0][RTW89_ETSI][18] = 64, + [2][0][2][0][RTW89_MKK][18] = 72, + [2][0][2][0][RTW89_IC][18] = 66, + [2][0][2][0][RTW89_KCC][18] = 70, + [2][0][2][0][RTW89_ACMA][18] = 64, + [2][0][2][0][RTW89_CHILE][18] = 54, + [2][0][2][0][RTW89_UKRAINE][18] = 52, + [2][0][2][0][RTW89_MEXICO][18] = 62, + [2][0][2][0][RTW89_CN][18] = 127, + [2][0][2][0][RTW89_QATAR][18] = 52, + [2][0][2][0][RTW89_FCC][26] = 72, + [2][0][2][0][RTW89_ETSI][26] = 64, + [2][0][2][0][RTW89_MKK][26] = 72, + [2][0][2][0][RTW89_IC][26] = 127, + [2][0][2][0][RTW89_KCC][26] = 72, + [2][0][2][0][RTW89_ACMA][26] = 127, + [2][0][2][0][RTW89_CHILE][26] = 54, + [2][0][2][0][RTW89_UKRAINE][26] = 52, + [2][0][2][0][RTW89_MEXICO][26] = 72, + [2][0][2][0][RTW89_CN][26] = 127, + [2][0][2][0][RTW89_QATAR][26] = 52, + [2][0][2][0][RTW89_FCC][34] = 72, + [2][0][2][0][RTW89_ETSI][34] = 127, + [2][0][2][0][RTW89_MKK][34] = 72, + [2][0][2][0][RTW89_IC][34] = 72, + [2][0][2][0][RTW89_KCC][34] = 72, + [2][0][2][0][RTW89_ACMA][34] = 72, + [2][0][2][0][RTW89_CHILE][34] = 54, + [2][0][2][0][RTW89_UKRAINE][34] = 127, + [2][0][2][0][RTW89_MEXICO][34] = 72, + [2][0][2][0][RTW89_CN][34] = 127, + [2][0][2][0][RTW89_QATAR][34] = 127, + [2][0][2][0][RTW89_FCC][41] = 72, + [2][0][2][0][RTW89_ETSI][41] = 28, + [2][0][2][0][RTW89_MKK][41] = 127, + [2][0][2][0][RTW89_IC][41] = 72, + [2][0][2][0][RTW89_KCC][41] = 68, + [2][0][2][0][RTW89_ACMA][41] = 72, + [2][0][2][0][RTW89_CHILE][41] = 54, + [2][0][2][0][RTW89_UKRAINE][41] = 28, + [2][0][2][0][RTW89_MEXICO][41] = 72, + [2][0][2][0][RTW89_CN][41] = 68, + [2][0][2][0][RTW89_QATAR][41] = 28, + [2][1][2][0][RTW89_FCC][3] = 56, + [2][1][2][0][RTW89_ETSI][3] = 52, + [2][1][2][0][RTW89_MKK][3] = 52, + [2][1][2][0][RTW89_IC][3] = 52, + [2][1][2][0][RTW89_KCC][3] = 66, + [2][1][2][0][RTW89_ACMA][3] = 52, + [2][1][2][0][RTW89_CHILE][3] = 18, + [2][1][2][0][RTW89_UKRAINE][3] = 40, + [2][1][2][0][RTW89_MEXICO][3] = 50, + [2][1][2][0][RTW89_CN][3] = 52, + [2][1][2][0][RTW89_QATAR][3] = 52, + [2][1][2][0][RTW89_FCC][11] = 56, + [2][1][2][0][RTW89_ETSI][11] = 52, + [2][1][2][0][RTW89_MKK][11] = 52, + [2][1][2][0][RTW89_IC][11] = 52, + [2][1][2][0][RTW89_KCC][11] = 64, + [2][1][2][0][RTW89_ACMA][11] = 52, + [2][1][2][0][RTW89_CHILE][11] = 42, + [2][1][2][0][RTW89_UKRAINE][11] = 40, + [2][1][2][0][RTW89_MEXICO][11] = 56, + [2][1][2][0][RTW89_CN][11] = 52, + [2][1][2][0][RTW89_QATAR][11] = 52, + [2][1][2][0][RTW89_FCC][18] = 56, + [2][1][2][0][RTW89_ETSI][18] = 52, + [2][1][2][0][RTW89_MKK][18] = 72, + [2][1][2][0][RTW89_IC][18] = 56, + [2][1][2][0][RTW89_KCC][18] = 58, + [2][1][2][0][RTW89_ACMA][18] = 52, + [2][1][2][0][RTW89_CHILE][18] = 42, + [2][1][2][0][RTW89_UKRAINE][18] = 40, + [2][1][2][0][RTW89_MEXICO][18] = 56, + [2][1][2][0][RTW89_CN][18] = 127, + [2][1][2][0][RTW89_QATAR][18] = 40, + [2][1][2][0][RTW89_FCC][26] = 72, + [2][1][2][0][RTW89_ETSI][26] = 52, + [2][1][2][0][RTW89_MKK][26] = 72, + [2][1][2][0][RTW89_IC][26] = 127, + [2][1][2][0][RTW89_KCC][26] = 64, + [2][1][2][0][RTW89_ACMA][26] = 127, + [2][1][2][0][RTW89_CHILE][26] = 42, + [2][1][2][0][RTW89_UKRAINE][26] = 40, + [2][1][2][0][RTW89_MEXICO][26] = 72, + [2][1][2][0][RTW89_CN][26] = 127, + [2][1][2][0][RTW89_QATAR][26] = 40, + [2][1][2][0][RTW89_FCC][34] = 72, + [2][1][2][0][RTW89_ETSI][34] = 127, + [2][1][2][0][RTW89_MKK][34] = 72, + [2][1][2][0][RTW89_IC][34] = 72, + [2][1][2][0][RTW89_KCC][34] = 64, + [2][1][2][0][RTW89_ACMA][34] = 72, + [2][1][2][0][RTW89_CHILE][34] = 42, + [2][1][2][0][RTW89_UKRAINE][34] = 127, + [2][1][2][0][RTW89_MEXICO][34] = 72, + [2][1][2][0][RTW89_CN][34] = 127, + [2][1][2][0][RTW89_QATAR][34] = 127, + [2][1][2][0][RTW89_FCC][41] = 72, + [2][1][2][0][RTW89_ETSI][41] = 16, + [2][1][2][0][RTW89_MKK][41] = 127, + [2][1][2][0][RTW89_IC][41] = 72, + [2][1][2][0][RTW89_KCC][41] = 58, + [2][1][2][0][RTW89_ACMA][41] = 72, + [2][1][2][0][RTW89_CHILE][41] = 42, + [2][1][2][0][RTW89_UKRAINE][41] = 16, + [2][1][2][0][RTW89_MEXICO][41] = 72, + [2][1][2][0][RTW89_CN][41] = 68, + [2][1][2][0][RTW89_QATAR][41] = 16, + [2][1][2][1][RTW89_FCC][3] = 56, + [2][1][2][1][RTW89_ETSI][3] = 40, + [2][1][2][1][RTW89_MKK][3] = 52, + [2][1][2][1][RTW89_IC][3] = 40, + [2][1][2][1][RTW89_KCC][3] = 66, + [2][1][2][1][RTW89_ACMA][3] = 40, + [2][1][2][1][RTW89_CHILE][3] = 6, + [2][1][2][1][RTW89_UKRAINE][3] = 28, + [2][1][2][1][RTW89_MEXICO][3] = 50, + [2][1][2][1][RTW89_CN][3] = 40, + [2][1][2][1][RTW89_QATAR][3] = 40, + [2][1][2][1][RTW89_FCC][11] = 56, + [2][1][2][1][RTW89_ETSI][11] = 40, + [2][1][2][1][RTW89_MKK][11] = 52, + [2][1][2][1][RTW89_IC][11] = 40, + [2][1][2][1][RTW89_KCC][11] = 64, + [2][1][2][1][RTW89_ACMA][11] = 40, + [2][1][2][1][RTW89_CHILE][11] = 30, + [2][1][2][1][RTW89_UKRAINE][11] = 28, + [2][1][2][1][RTW89_MEXICO][11] = 56, + [2][1][2][1][RTW89_CN][11] = 40, + [2][1][2][1][RTW89_QATAR][11] = 40, + [2][1][2][1][RTW89_FCC][18] = 56, + [2][1][2][1][RTW89_ETSI][18] = 40, + [2][1][2][1][RTW89_MKK][18] = 72, + [2][1][2][1][RTW89_IC][18] = 56, + [2][1][2][1][RTW89_KCC][18] = 58, + [2][1][2][1][RTW89_ACMA][18] = 40, + [2][1][2][1][RTW89_CHILE][18] = 30, + [2][1][2][1][RTW89_UKRAINE][18] = 28, + [2][1][2][1][RTW89_MEXICO][18] = 56, + [2][1][2][1][RTW89_CN][18] = 127, + [2][1][2][1][RTW89_QATAR][18] = 28, + [2][1][2][1][RTW89_FCC][26] = 68, + [2][1][2][1][RTW89_ETSI][26] = 40, + [2][1][2][1][RTW89_MKK][26] = 72, + [2][1][2][1][RTW89_IC][26] = 127, + [2][1][2][1][RTW89_KCC][26] = 64, + [2][1][2][1][RTW89_ACMA][26] = 127, + [2][1][2][1][RTW89_CHILE][26] = 30, + [2][1][2][1][RTW89_UKRAINE][26] = 28, + [2][1][2][1][RTW89_MEXICO][26] = 68, + [2][1][2][1][RTW89_CN][26] = 127, + [2][1][2][1][RTW89_QATAR][26] = 28, + [2][1][2][1][RTW89_FCC][34] = 68, + [2][1][2][1][RTW89_ETSI][34] = 127, + [2][1][2][1][RTW89_MKK][34] = 72, + [2][1][2][1][RTW89_IC][34] = 68, + [2][1][2][1][RTW89_KCC][34] = 64, + [2][1][2][1][RTW89_ACMA][34] = 68, + [2][1][2][1][RTW89_CHILE][34] = 30, + [2][1][2][1][RTW89_UKRAINE][34] = 127, + [2][1][2][1][RTW89_MEXICO][34] = 68, + [2][1][2][1][RTW89_CN][34] = 127, + [2][1][2][1][RTW89_QATAR][34] = 127, + [2][1][2][1][RTW89_FCC][41] = 72, + [2][1][2][1][RTW89_ETSI][41] = 4, + [2][1][2][1][RTW89_MKK][41] = 127, + [2][1][2][1][RTW89_IC][41] = 72, + [2][1][2][1][RTW89_KCC][41] = 58, + [2][1][2][1][RTW89_ACMA][41] = 72, + [2][1][2][1][RTW89_CHILE][41] = 30, + [2][1][2][1][RTW89_UKRAINE][41] = 4, + [2][1][2][1][RTW89_MEXICO][41] = 72, + [2][1][2][1][RTW89_CN][41] = 64, + [2][1][2][1][RTW89_QATAR][41] = 4, }; const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { - [0][0][0][0] = 32, - [0][0][0][1] = 32, - [0][0][0][2] = 32, - [0][0][0][3] = 32, - [0][0][0][4] = 32, - [0][0][0][5] = 32, - [0][0][0][6] = 32, - [0][0][0][7] = 32, - [0][0][0][8] = 32, - [0][0][0][9] = 32, - [0][0][0][10] = 32, - [0][0][0][11] = 32, - [0][0][0][12] = 32, - [0][0][0][13] = 0, - [0][1][0][0] = 20, - [0][1][0][1] = 20, - [0][1][0][2] = 20, - [0][1][0][3] = 20, - [0][1][0][4] = 20, - [0][1][0][5] = 20, - [0][1][0][6] = 20, - [0][1][0][7] = 20, - [0][1][0][8] = 20, - [0][1][0][9] = 20, - [0][1][0][10] = 20, - [0][1][0][11] = 20, - [0][1][0][12] = 20, - [0][1][0][13] = 0, - [1][0][0][0] = 42, - [1][0][0][1] = 42, - [1][0][0][2] = 42, - [1][0][0][3] = 42, - [1][0][0][4] = 42, - [1][0][0][5] = 42, - [1][0][0][6] = 42, - [1][0][0][7] = 42, - [1][0][0][8] = 42, - [1][0][0][9] = 42, - [1][0][0][10] = 42, - [1][0][0][11] = 42, - [1][0][0][12] = 36, - [1][0][0][13] = 0, - [1][1][0][0] = 30, - [1][1][0][1] = 30, - [1][1][0][2] = 30, - [1][1][0][3] = 30, - [1][1][0][4] = 30, - [1][1][0][5] = 30, - [1][1][0][6] = 30, - [1][1][0][7] = 30, - [1][1][0][8] = 30, - [1][1][0][9] = 30, - [1][1][0][10] = 30, - [1][1][0][11] = 30, - [1][1][0][12] = 30, - [1][1][0][13] = 0, - [2][0][0][0] = 52, - [2][0][0][1] = 52, - [2][0][0][2] = 52, - [2][0][0][3] = 52, - [2][0][0][4] = 52, - [2][0][0][5] = 52, - [2][0][0][6] = 52, - [2][0][0][7] = 52, - [2][0][0][8] = 52, - [2][0][0][9] = 52, - [2][0][0][10] = 52, - [2][0][0][11] = 52, - [2][0][0][12] = 40, - [2][0][0][13] = 0, - [2][1][0][0] = 40, - [2][1][0][1] = 40, - [2][1][0][2] = 40, - [2][1][0][3] = 40, - [2][1][0][4] = 40, - [2][1][0][5] = 40, - [2][1][0][6] = 40, - [2][1][0][7] = 40, - [2][1][0][8] = 40, - [2][1][0][9] = 40, - [2][1][0][10] = 40, - [2][1][0][11] = 40, - [2][1][0][12] = 26, - [2][1][0][13] = 0, - [0][0][2][0] = 70, - [0][0][1][0] = 32, - [0][0][3][0] = 40, - [0][0][5][0] = 70, - [0][0][6][0] = 32, - [0][0][9][0] = 32, - [0][0][8][0] = 60, - [0][0][11][0] = 32, - [0][0][2][1] = 70, - [0][0][1][1] = 32, - [0][0][3][1] = 40, - [0][0][5][1] = 70, - [0][0][6][1] = 32, - [0][0][9][1] = 32, - [0][0][8][1] = 60, - [0][0][11][1] = 32, - [0][0][2][2] = 74, - [0][0][1][2] = 32, - [0][0][3][2] = 40, - [0][0][5][2] = 74, - [0][0][6][2] = 32, - [0][0][9][2] = 32, - [0][0][8][2] = 60, - [0][0][11][2] = 32, - [0][0][2][3] = 78, - [0][0][1][3] = 32, - [0][0][3][3] = 40, - [0][0][5][3] = 78, - [0][0][6][3] = 32, - [0][0][9][3] = 32, - [0][0][8][3] = 60, - [0][0][11][3] = 32, - [0][0][2][4] = 78, - [0][0][1][4] = 32, - [0][0][3][4] = 40, - [0][0][5][4] = 78, - [0][0][6][4] = 32, - [0][0][9][4] = 32, - [0][0][8][4] = 60, - [0][0][11][4] = 32, - [0][0][2][5] = 78, - [0][0][1][5] = 32, - [0][0][3][5] = 40, - [0][0][5][5] = 78, - [0][0][6][5] = 32, - [0][0][9][5] = 32, - [0][0][8][5] = 60, - [0][0][11][5] = 32, - [0][0][2][6] = 78, - [0][0][1][6] = 32, - [0][0][3][6] = 40, - [0][0][5][6] = 78, - [0][0][6][6] = 32, - [0][0][9][6] = 32, - [0][0][8][6] = 60, - [0][0][11][6] = 32, - [0][0][2][7] = 78, - [0][0][1][7] = 32, - [0][0][3][7] = 40, - [0][0][5][7] = 78, - [0][0][6][7] = 32, - [0][0][9][7] = 32, - [0][0][8][7] = 60, - [0][0][11][7] = 32, - [0][0][2][8] = 74, - [0][0][1][8] = 32, - [0][0][3][8] = 40, - [0][0][5][8] = 74, - [0][0][6][8] = 32, - [0][0][9][8] = 32, - [0][0][8][8] = 60, - [0][0][11][8] = 32, - [0][0][2][9] = 70, - [0][0][1][9] = 32, - [0][0][3][9] = 40, - [0][0][5][9] = 70, - [0][0][6][9] = 32, - [0][0][9][9] = 32, - [0][0][8][9] = 60, - [0][0][11][9] = 32, - [0][0][2][10] = 70, - [0][0][1][10] = 32, - [0][0][3][10] = 40, - [0][0][5][10] = 70, - [0][0][6][10] = 32, - [0][0][9][10] = 32, - [0][0][8][10] = 60, - [0][0][11][10] = 32, - [0][0][2][11] = 58, - [0][0][1][11] = 32, - [0][0][3][11] = 40, - [0][0][5][11] = 58, - [0][0][6][11] = 32, - [0][0][9][11] = 32, - [0][0][8][11] = 60, - [0][0][11][11] = 32, - [0][0][2][12] = 34, - [0][0][1][12] = 32, - [0][0][3][12] = 40, - [0][0][5][12] = 34, - [0][0][6][12] = 32, - [0][0][9][12] = 32, - [0][0][8][12] = 60, - [0][0][11][12] = 32, - [0][0][2][13] = 127, - [0][0][1][13] = 127, - [0][0][3][13] = 127, - [0][0][5][13] = 127, - [0][0][6][13] = 127, - [0][0][9][13] = 127, - [0][0][8][13] = 127, - [0][0][11][13] = 127, - [0][1][2][0] = 64, - [0][1][1][0] = 20, - [0][1][3][0] = 28, - [0][1][5][0] = 64, - [0][1][6][0] = 20, - [0][1][9][0] = 20, - [0][1][8][0] = 48, - [0][1][11][0] = 20, - [0][1][2][1] = 64, - [0][1][1][1] = 20, - [0][1][3][1] = 28, - [0][1][5][1] = 64, - [0][1][6][1] = 20, - [0][1][9][1] = 20, - [0][1][8][1] = 48, - [0][1][11][1] = 20, - [0][1][2][2] = 68, - [0][1][1][2] = 20, - [0][1][3][2] = 28, - [0][1][5][2] = 68, - [0][1][6][2] = 20, - [0][1][9][2] = 20, - [0][1][8][2] = 48, - [0][1][11][2] = 20, - [0][1][2][3] = 72, - [0][1][1][3] = 20, - [0][1][3][3] = 28, - [0][1][5][3] = 72, - [0][1][6][3] = 20, - [0][1][9][3] = 20, - [0][1][8][3] = 48, - [0][1][11][3] = 20, - [0][1][2][4] = 76, - [0][1][1][4] = 20, - [0][1][3][4] = 28, - [0][1][5][4] = 76, - [0][1][6][4] = 20, - [0][1][9][4] = 20, - [0][1][8][4] = 48, - [0][1][11][4] = 20, - [0][1][2][5] = 78, - [0][1][1][5] = 20, - [0][1][3][5] = 28, - [0][1][5][5] = 78, - [0][1][6][5] = 20, - [0][1][9][5] = 20, - [0][1][8][5] = 48, - [0][1][11][5] = 20, - [0][1][2][6] = 76, - [0][1][1][6] = 20, - [0][1][3][6] = 28, - [0][1][5][6] = 76, - [0][1][6][6] = 20, - [0][1][9][6] = 20, - [0][1][8][6] = 48, - [0][1][11][6] = 20, - [0][1][2][7] = 72, - [0][1][1][7] = 20, - [0][1][3][7] = 28, - [0][1][5][7] = 72, - [0][1][6][7] = 20, - [0][1][9][7] = 20, - [0][1][8][7] = 48, - [0][1][11][7] = 20, - [0][1][2][8] = 68, - [0][1][1][8] = 20, - [0][1][3][8] = 28, - [0][1][5][8] = 68, - [0][1][6][8] = 20, - [0][1][9][8] = 20, - [0][1][8][8] = 48, - [0][1][11][8] = 20, - [0][1][2][9] = 64, - [0][1][1][9] = 20, - [0][1][3][9] = 28, - [0][1][5][9] = 64, - [0][1][6][9] = 20, - [0][1][9][9] = 20, - [0][1][8][9] = 48, - [0][1][11][9] = 20, - [0][1][2][10] = 64, - [0][1][1][10] = 20, - [0][1][3][10] = 28, - [0][1][5][10] = 64, - [0][1][6][10] = 20, - [0][1][9][10] = 20, - [0][1][8][10] = 48, - [0][1][11][10] = 20, - [0][1][2][11] = 54, - [0][1][1][11] = 20, - [0][1][3][11] = 28, - [0][1][5][11] = 54, - [0][1][6][11] = 20, - [0][1][9][11] = 20, - [0][1][8][11] = 48, - [0][1][11][11] = 20, - [0][1][2][12] = 32, - [0][1][1][12] = 20, - [0][1][3][12] = 28, - [0][1][5][12] = 32, - [0][1][6][12] = 20, - [0][1][9][12] = 20, - [0][1][8][12] = 48, - [0][1][11][12] = 20, - [0][1][2][13] = 127, - [0][1][1][13] = 127, - [0][1][3][13] = 127, - [0][1][5][13] = 127, - [0][1][6][13] = 127, - [0][1][9][13] = 127, - [0][1][8][13] = 127, - [0][1][11][13] = 127, - [1][0][2][0] = 72, - [1][0][1][0] = 42, - [1][0][3][0] = 50, - [1][0][5][0] = 72, - [1][0][6][0] = 42, - [1][0][9][0] = 42, - [1][0][8][0] = 60, - [1][0][11][0] = 42, - [1][0][2][1] = 72, - [1][0][1][1] = 42, - [1][0][3][1] = 50, - [1][0][5][1] = 72, - [1][0][6][1] = 42, - [1][0][9][1] = 42, - [1][0][8][1] = 60, - [1][0][11][1] = 42, - [1][0][2][2] = 76, - [1][0][1][2] = 42, - [1][0][3][2] = 50, - [1][0][5][2] = 76, - [1][0][6][2] = 42, - [1][0][9][2] = 42, - [1][0][8][2] = 60, - [1][0][11][2] = 42, - [1][0][2][3] = 78, - [1][0][1][3] = 42, - [1][0][3][3] = 50, - [1][0][5][3] = 78, - [1][0][6][3] = 42, - [1][0][9][3] = 42, - [1][0][8][3] = 60, - [1][0][11][3] = 42, - [1][0][2][4] = 78, - [1][0][1][4] = 42, - [1][0][3][4] = 50, - [1][0][5][4] = 78, - [1][0][6][4] = 42, - [1][0][9][4] = 42, - [1][0][8][4] = 60, - [1][0][11][4] = 42, - [1][0][2][5] = 78, - [1][0][1][5] = 42, - [1][0][3][5] = 50, - [1][0][5][5] = 78, - [1][0][6][5] = 42, - [1][0][9][5] = 42, - [1][0][8][5] = 60, - [1][0][11][5] = 42, - [1][0][2][6] = 78, - [1][0][1][6] = 42, - [1][0][3][6] = 50, - [1][0][5][6] = 78, - [1][0][6][6] = 42, - [1][0][9][6] = 42, - [1][0][8][6] = 60, - [1][0][11][6] = 42, - [1][0][2][7] = 78, - [1][0][1][7] = 42, - [1][0][3][7] = 50, - [1][0][5][7] = 78, - [1][0][6][7] = 42, - [1][0][9][7] = 42, - [1][0][8][7] = 60, - [1][0][11][7] = 42, - [1][0][2][8] = 78, - [1][0][1][8] = 42, - [1][0][3][8] = 50, - [1][0][5][8] = 78, - [1][0][6][8] = 42, - [1][0][9][8] = 42, - [1][0][8][8] = 60, - [1][0][11][8] = 42, - [1][0][2][9] = 74, - [1][0][1][9] = 42, - [1][0][3][9] = 50, - [1][0][5][9] = 74, - [1][0][6][9] = 42, - [1][0][9][9] = 42, - [1][0][8][9] = 60, - [1][0][11][9] = 42, - [1][0][2][10] = 74, - [1][0][1][10] = 42, - [1][0][3][10] = 50, - [1][0][5][10] = 74, - [1][0][6][10] = 42, - [1][0][9][10] = 42, - [1][0][8][10] = 60, - [1][0][11][10] = 42, - [1][0][2][11] = 64, - [1][0][1][11] = 42, - [1][0][3][11] = 50, - [1][0][5][11] = 64, - [1][0][6][11] = 42, - [1][0][9][11] = 42, - [1][0][8][11] = 60, - [1][0][11][11] = 42, - [1][0][2][12] = 36, - [1][0][1][12] = 42, - [1][0][3][12] = 50, - [1][0][5][12] = 36, - [1][0][6][12] = 42, - [1][0][9][12] = 42, - [1][0][8][12] = 60, - [1][0][11][12] = 42, - [1][0][2][13] = 127, - [1][0][1][13] = 127, - [1][0][3][13] = 127, - [1][0][5][13] = 127, - [1][0][6][13] = 127, - [1][0][9][13] = 127, - [1][0][8][13] = 127, - [1][0][11][13] = 127, - [1][1][2][0] = 66, - [1][1][1][0] = 30, - [1][1][3][0] = 38, - [1][1][5][0] = 66, - [1][1][6][0] = 30, - [1][1][9][0] = 30, - [1][1][8][0] = 48, - [1][1][11][0] = 30, - [1][1][2][1] = 66, - [1][1][1][1] = 30, - [1][1][3][1] = 38, - [1][1][5][1] = 66, - [1][1][6][1] = 30, - [1][1][9][1] = 30, - [1][1][8][1] = 48, - [1][1][11][1] = 30, - [1][1][2][2] = 70, - [1][1][1][2] = 30, - [1][1][3][2] = 38, - [1][1][5][2] = 70, - [1][1][6][2] = 30, - [1][1][9][2] = 30, - [1][1][8][2] = 48, - [1][1][11][2] = 30, - [1][1][2][3] = 74, - [1][1][1][3] = 30, - [1][1][3][3] = 38, - [1][1][5][3] = 74, - [1][1][6][3] = 30, - [1][1][9][3] = 30, - [1][1][8][3] = 48, - [1][1][11][3] = 30, - [1][1][2][4] = 78, - [1][1][1][4] = 30, - [1][1][3][4] = 38, - [1][1][5][4] = 78, - [1][1][6][4] = 30, - [1][1][9][4] = 30, - [1][1][8][4] = 48, - [1][1][11][4] = 30, - [1][1][2][5] = 78, - [1][1][1][5] = 30, - [1][1][3][5] = 38, - [1][1][5][5] = 78, - [1][1][6][5] = 30, - [1][1][9][5] = 30, - [1][1][8][5] = 48, - [1][1][11][5] = 30, - [1][1][2][6] = 78, - [1][1][1][6] = 30, - [1][1][3][6] = 38, - [1][1][5][6] = 78, - [1][1][6][6] = 30, - [1][1][9][6] = 30, - [1][1][8][6] = 48, - [1][1][11][6] = 30, - [1][1][2][7] = 74, - [1][1][1][7] = 30, - [1][1][3][7] = 38, - [1][1][5][7] = 74, - [1][1][6][7] = 30, - [1][1][9][7] = 30, - [1][1][8][7] = 48, - [1][1][11][7] = 30, - [1][1][2][8] = 70, - [1][1][1][8] = 30, - [1][1][3][8] = 38, - [1][1][5][8] = 70, - [1][1][6][8] = 30, - [1][1][9][8] = 30, - [1][1][8][8] = 48, - [1][1][11][8] = 30, - [1][1][2][9] = 66, - [1][1][1][9] = 30, - [1][1][3][9] = 38, - [1][1][5][9] = 66, - [1][1][6][9] = 30, - [1][1][9][9] = 30, - [1][1][8][9] = 48, - [1][1][11][9] = 30, - [1][1][2][10] = 66, - [1][1][1][10] = 30, - [1][1][3][10] = 38, - [1][1][5][10] = 66, - [1][1][6][10] = 30, - [1][1][9][10] = 30, - [1][1][8][10] = 48, - [1][1][11][10] = 30, - [1][1][2][11] = 60, - [1][1][1][11] = 30, - [1][1][3][11] = 38, - [1][1][5][11] = 60, - [1][1][6][11] = 30, - [1][1][9][11] = 30, - [1][1][8][11] = 48, - [1][1][11][11] = 30, - [1][1][2][12] = 32, - [1][1][1][12] = 30, - [1][1][3][12] = 38, - [1][1][5][12] = 32, - [1][1][6][12] = 30, - [1][1][9][12] = 30, - [1][1][8][12] = 48, - [1][1][11][12] = 30, - [1][1][2][13] = 127, - [1][1][1][13] = 127, - [1][1][3][13] = 127, - [1][1][5][13] = 127, - [1][1][6][13] = 127, - [1][1][9][13] = 127, - [1][1][8][13] = 127, - [1][1][11][13] = 127, - [2][0][2][0] = 76, - [2][0][1][0] = 52, - [2][0][3][0] = 64, - [2][0][5][0] = 76, - [2][0][6][0] = 52, - [2][0][9][0] = 52, - [2][0][8][0] = 60, - [2][0][11][0] = 52, - [2][0][2][1] = 76, - [2][0][1][1] = 52, - [2][0][3][1] = 64, - [2][0][5][1] = 76, - [2][0][6][1] = 52, - [2][0][9][1] = 52, - [2][0][8][1] = 60, - [2][0][11][1] = 52, - [2][0][2][2] = 78, - [2][0][1][2] = 52, - [2][0][3][2] = 64, - [2][0][5][2] = 78, - [2][0][6][2] = 52, - [2][0][9][2] = 52, - [2][0][8][2] = 60, - [2][0][11][2] = 52, - [2][0][2][3] = 78, - [2][0][1][3] = 52, - [2][0][3][3] = 64, - [2][0][5][3] = 78, - [2][0][6][3] = 52, - [2][0][9][3] = 52, - [2][0][8][3] = 60, - [2][0][11][3] = 52, - [2][0][2][4] = 78, - [2][0][1][4] = 52, - [2][0][3][4] = 64, - [2][0][5][4] = 78, - [2][0][6][4] = 52, - [2][0][9][4] = 52, - [2][0][8][4] = 60, - [2][0][11][4] = 52, - [2][0][2][5] = 78, - [2][0][1][5] = 52, - [2][0][3][5] = 64, - [2][0][5][5] = 78, - [2][0][6][5] = 52, - [2][0][9][5] = 52, - [2][0][8][5] = 60, - [2][0][11][5] = 52, - [2][0][2][6] = 78, - [2][0][1][6] = 52, - [2][0][3][6] = 64, - [2][0][5][6] = 78, - [2][0][6][6] = 52, - [2][0][9][6] = 52, - [2][0][8][6] = 60, - [2][0][11][6] = 52, - [2][0][2][7] = 78, - [2][0][1][7] = 52, - [2][0][3][7] = 64, - [2][0][5][7] = 78, - [2][0][6][7] = 52, - [2][0][9][7] = 52, - [2][0][8][7] = 60, - [2][0][11][7] = 52, - [2][0][2][8] = 78, - [2][0][1][8] = 52, - [2][0][3][8] = 64, - [2][0][5][8] = 78, - [2][0][6][8] = 52, - [2][0][9][8] = 52, - [2][0][8][8] = 60, - [2][0][11][8] = 52, - [2][0][2][9] = 76, - [2][0][1][9] = 52, - [2][0][3][9] = 64, - [2][0][5][9] = 76, - [2][0][6][9] = 52, - [2][0][9][9] = 52, - [2][0][8][9] = 60, - [2][0][11][9] = 52, - [2][0][2][10] = 76, - [2][0][1][10] = 52, - [2][0][3][10] = 64, - [2][0][5][10] = 76, - [2][0][6][10] = 52, - [2][0][9][10] = 52, - [2][0][8][10] = 60, - [2][0][11][10] = 52, - [2][0][2][11] = 68, - [2][0][1][11] = 52, - [2][0][3][11] = 64, - [2][0][5][11] = 68, - [2][0][6][11] = 52, - [2][0][9][11] = 52, - [2][0][8][11] = 60, - [2][0][11][11] = 52, - [2][0][2][12] = 40, - [2][0][1][12] = 52, - [2][0][3][12] = 64, - [2][0][5][12] = 40, - [2][0][6][12] = 52, - [2][0][9][12] = 52, - [2][0][8][12] = 60, - [2][0][11][12] = 52, - [2][0][2][13] = 127, - [2][0][1][13] = 127, - [2][0][3][13] = 127, - [2][0][5][13] = 127, - [2][0][6][13] = 127, - [2][0][9][13] = 127, - [2][0][8][13] = 127, - [2][0][11][13] = 127, - [2][1][2][0] = 68, - [2][1][1][0] = 40, - [2][1][3][0] = 52, - [2][1][5][0] = 68, - [2][1][6][0] = 40, - [2][1][9][0] = 40, - [2][1][8][0] = 48, - [2][1][11][0] = 40, - [2][1][2][1] = 68, - [2][1][1][1] = 40, - [2][1][3][1] = 52, - [2][1][5][1] = 68, - [2][1][6][1] = 40, - [2][1][9][1] = 40, - [2][1][8][1] = 48, - [2][1][11][1] = 40, - [2][1][2][2] = 72, - [2][1][1][2] = 40, - [2][1][3][2] = 52, - [2][1][5][2] = 72, - [2][1][6][2] = 40, - [2][1][9][2] = 40, - [2][1][8][2] = 48, - [2][1][11][2] = 40, - [2][1][2][3] = 76, - [2][1][1][3] = 40, - [2][1][3][3] = 52, - [2][1][5][3] = 76, - [2][1][6][3] = 40, - [2][1][9][3] = 40, - [2][1][8][3] = 48, - [2][1][11][3] = 40, - [2][1][2][4] = 78, - [2][1][1][4] = 40, - [2][1][3][4] = 52, - [2][1][5][4] = 78, - [2][1][6][4] = 40, - [2][1][9][4] = 40, - [2][1][8][4] = 48, - [2][1][11][4] = 40, - [2][1][2][5] = 78, - [2][1][1][5] = 40, - [2][1][3][5] = 52, - [2][1][5][5] = 78, - [2][1][6][5] = 40, - [2][1][9][5] = 40, - [2][1][8][5] = 48, - [2][1][11][5] = 40, - [2][1][2][6] = 78, - [2][1][1][6] = 40, - [2][1][3][6] = 52, - [2][1][5][6] = 78, - [2][1][6][6] = 40, - [2][1][9][6] = 40, - [2][1][8][6] = 48, - [2][1][11][6] = 40, - [2][1][2][7] = 78, - [2][1][1][7] = 40, - [2][1][3][7] = 52, - [2][1][5][7] = 78, - [2][1][6][7] = 40, - [2][1][9][7] = 40, - [2][1][8][7] = 48, - [2][1][11][7] = 40, - [2][1][2][8] = 74, - [2][1][1][8] = 40, - [2][1][3][8] = 52, - [2][1][5][8] = 74, - [2][1][6][8] = 40, - [2][1][9][8] = 40, - [2][1][8][8] = 48, - [2][1][11][8] = 40, - [2][1][2][9] = 70, - [2][1][1][9] = 40, - [2][1][3][9] = 52, - [2][1][5][9] = 70, - [2][1][6][9] = 40, - [2][1][9][9] = 40, - [2][1][8][9] = 48, - [2][1][11][9] = 40, - [2][1][2][10] = 70, - [2][1][1][10] = 40, - [2][1][3][10] = 52, - [2][1][5][10] = 70, - [2][1][6][10] = 40, - [2][1][9][10] = 40, - [2][1][8][10] = 48, - [2][1][11][10] = 40, - [2][1][2][11] = 48, - [2][1][1][11] = 40, - [2][1][3][11] = 52, - [2][1][5][11] = 48, - [2][1][6][11] = 40, - [2][1][9][11] = 40, - [2][1][8][11] = 48, - [2][1][11][11] = 40, - [2][1][2][12] = 26, - [2][1][1][12] = 40, - [2][1][3][12] = 52, - [2][1][5][12] = 26, - [2][1][6][12] = 40, - [2][1][9][12] = 40, - [2][1][8][12] = 48, - [2][1][11][12] = 40, - [2][1][2][13] = 127, - [2][1][1][13] = 127, - [2][1][3][13] = 127, - [2][1][5][13] = 127, - [2][1][6][13] = 127, - [2][1][9][13] = 127, - [2][1][8][13] = 127, - [2][1][11][13] = 127, + [0][0][RTW89_WW][0] = 32, + [0][0][RTW89_WW][1] = 32, + [0][0][RTW89_WW][2] = 32, + [0][0][RTW89_WW][3] = 32, + [0][0][RTW89_WW][4] = 32, + [0][0][RTW89_WW][5] = 32, + [0][0][RTW89_WW][6] = 32, + [0][0][RTW89_WW][7] = 32, + [0][0][RTW89_WW][8] = 32, + [0][0][RTW89_WW][9] = 32, + [0][0][RTW89_WW][10] = 32, + [0][0][RTW89_WW][11] = 32, + [0][0][RTW89_WW][12] = 32, + [0][0][RTW89_WW][13] = 0, + [0][1][RTW89_WW][0] = 20, + [0][1][RTW89_WW][1] = 20, + [0][1][RTW89_WW][2] = 20, + [0][1][RTW89_WW][3] = 20, + [0][1][RTW89_WW][4] = 20, + [0][1][RTW89_WW][5] = 20, + [0][1][RTW89_WW][6] = 20, + [0][1][RTW89_WW][7] = 20, + [0][1][RTW89_WW][8] = 20, + [0][1][RTW89_WW][9] = 20, + [0][1][RTW89_WW][10] = 20, + [0][1][RTW89_WW][11] = 20, + [0][1][RTW89_WW][12] = 20, + [0][1][RTW89_WW][13] = 0, + [1][0][RTW89_WW][0] = 42, + [1][0][RTW89_WW][1] = 42, + [1][0][RTW89_WW][2] = 42, + [1][0][RTW89_WW][3] = 42, + [1][0][RTW89_WW][4] = 42, + [1][0][RTW89_WW][5] = 42, + [1][0][RTW89_WW][6] = 42, + [1][0][RTW89_WW][7] = 42, + [1][0][RTW89_WW][8] = 42, + [1][0][RTW89_WW][9] = 42, + [1][0][RTW89_WW][10] = 42, + [1][0][RTW89_WW][11] = 42, + [1][0][RTW89_WW][12] = 36, + [1][0][RTW89_WW][13] = 0, + [1][1][RTW89_WW][0] = 30, + [1][1][RTW89_WW][1] = 30, + [1][1][RTW89_WW][2] = 30, + [1][1][RTW89_WW][3] = 30, + [1][1][RTW89_WW][4] = 30, + [1][1][RTW89_WW][5] = 30, + [1][1][RTW89_WW][6] = 30, + [1][1][RTW89_WW][7] = 30, + [1][1][RTW89_WW][8] = 30, + [1][1][RTW89_WW][9] = 30, + [1][1][RTW89_WW][10] = 30, + [1][1][RTW89_WW][11] = 30, + [1][1][RTW89_WW][12] = 30, + [1][1][RTW89_WW][13] = 0, + [2][0][RTW89_WW][0] = 52, + [2][0][RTW89_WW][1] = 52, + [2][0][RTW89_WW][2] = 52, + [2][0][RTW89_WW][3] = 52, + [2][0][RTW89_WW][4] = 52, + [2][0][RTW89_WW][5] = 52, + [2][0][RTW89_WW][6] = 52, + [2][0][RTW89_WW][7] = 52, + [2][0][RTW89_WW][8] = 52, + [2][0][RTW89_WW][9] = 52, + [2][0][RTW89_WW][10] = 52, + [2][0][RTW89_WW][11] = 52, + [2][0][RTW89_WW][12] = 40, + [2][0][RTW89_WW][13] = 0, + [2][1][RTW89_WW][0] = 40, + [2][1][RTW89_WW][1] = 40, + [2][1][RTW89_WW][2] = 40, + [2][1][RTW89_WW][3] = 40, + [2][1][RTW89_WW][4] = 40, + [2][1][RTW89_WW][5] = 40, + [2][1][RTW89_WW][6] = 40, + [2][1][RTW89_WW][7] = 40, + [2][1][RTW89_WW][8] = 40, + [2][1][RTW89_WW][9] = 40, + [2][1][RTW89_WW][10] = 40, + [2][1][RTW89_WW][11] = 40, + [2][1][RTW89_WW][12] = 26, + [2][1][RTW89_WW][13] = 0, + [0][0][RTW89_FCC][0] = 70, + [0][0][RTW89_ETSI][0] = 32, + [0][0][RTW89_MKK][0] = 40, + [0][0][RTW89_IC][0] = 70, + [0][0][RTW89_KCC][0] = 46, + [0][0][RTW89_ACMA][0] = 32, + [0][0][RTW89_CHILE][0] = 60, + [0][0][RTW89_UKRAINE][0] = 32, + [0][0][RTW89_MEXICO][0] = 70, + [0][0][RTW89_CN][0] = 32, + [0][0][RTW89_QATAR][0] = 32, + [0][0][RTW89_FCC][1] = 70, + [0][0][RTW89_ETSI][1] = 32, + [0][0][RTW89_MKK][1] = 40, + [0][0][RTW89_IC][1] = 70, + [0][0][RTW89_KCC][1] = 46, + [0][0][RTW89_ACMA][1] = 32, + [0][0][RTW89_CHILE][1] = 60, + [0][0][RTW89_UKRAINE][1] = 32, + [0][0][RTW89_MEXICO][1] = 70, + [0][0][RTW89_CN][1] = 32, + [0][0][RTW89_QATAR][1] = 32, + [0][0][RTW89_FCC][2] = 74, + [0][0][RTW89_ETSI][2] = 32, + [0][0][RTW89_MKK][2] = 40, + [0][0][RTW89_IC][2] = 74, + [0][0][RTW89_KCC][2] = 46, + [0][0][RTW89_ACMA][2] = 32, + [0][0][RTW89_CHILE][2] = 60, + [0][0][RTW89_UKRAINE][2] = 32, + [0][0][RTW89_MEXICO][2] = 74, + [0][0][RTW89_CN][2] = 32, + [0][0][RTW89_QATAR][2] = 32, + [0][0][RTW89_FCC][3] = 78, + [0][0][RTW89_ETSI][3] = 32, + [0][0][RTW89_MKK][3] = 40, + [0][0][RTW89_IC][3] = 78, + [0][0][RTW89_KCC][3] = 46, + [0][0][RTW89_ACMA][3] = 32, + [0][0][RTW89_CHILE][3] = 60, + [0][0][RTW89_UKRAINE][3] = 32, + [0][0][RTW89_MEXICO][3] = 78, + [0][0][RTW89_CN][3] = 32, + [0][0][RTW89_QATAR][3] = 32, + [0][0][RTW89_FCC][4] = 78, + [0][0][RTW89_ETSI][4] = 32, + [0][0][RTW89_MKK][4] = 40, + [0][0][RTW89_IC][4] = 78, + [0][0][RTW89_KCC][4] = 46, + [0][0][RTW89_ACMA][4] = 32, + [0][0][RTW89_CHILE][4] = 60, + [0][0][RTW89_UKRAINE][4] = 32, + [0][0][RTW89_MEXICO][4] = 78, + [0][0][RTW89_CN][4] = 32, + [0][0][RTW89_QATAR][4] = 32, + [0][0][RTW89_FCC][5] = 78, + [0][0][RTW89_ETSI][5] = 32, + [0][0][RTW89_MKK][5] = 40, + [0][0][RTW89_IC][5] = 78, + [0][0][RTW89_KCC][5] = 46, + [0][0][RTW89_ACMA][5] = 32, + [0][0][RTW89_CHILE][5] = 60, + [0][0][RTW89_UKRAINE][5] = 32, + [0][0][RTW89_MEXICO][5] = 78, + [0][0][RTW89_CN][5] = 32, + [0][0][RTW89_QATAR][5] = 32, + [0][0][RTW89_FCC][6] = 78, + [0][0][RTW89_ETSI][6] = 32, + [0][0][RTW89_MKK][6] = 40, + [0][0][RTW89_IC][6] = 78, + [0][0][RTW89_KCC][6] = 46, + [0][0][RTW89_ACMA][6] = 32, + [0][0][RTW89_CHILE][6] = 60, + [0][0][RTW89_UKRAINE][6] = 32, + [0][0][RTW89_MEXICO][6] = 78, + [0][0][RTW89_CN][6] = 32, + [0][0][RTW89_QATAR][6] = 32, + [0][0][RTW89_FCC][7] = 78, + [0][0][RTW89_ETSI][7] = 32, + [0][0][RTW89_MKK][7] = 40, + [0][0][RTW89_IC][7] = 78, + [0][0][RTW89_KCC][7] = 46, + [0][0][RTW89_ACMA][7] = 32, + [0][0][RTW89_CHILE][7] = 60, + [0][0][RTW89_UKRAINE][7] = 32, + [0][0][RTW89_MEXICO][7] = 78, + [0][0][RTW89_CN][7] = 32, + [0][0][RTW89_QATAR][7] = 32, + [0][0][RTW89_FCC][8] = 74, + [0][0][RTW89_ETSI][8] = 32, + [0][0][RTW89_MKK][8] = 40, + [0][0][RTW89_IC][8] = 74, + [0][0][RTW89_KCC][8] = 46, + [0][0][RTW89_ACMA][8] = 32, + [0][0][RTW89_CHILE][8] = 60, + [0][0][RTW89_UKRAINE][8] = 32, + [0][0][RTW89_MEXICO][8] = 74, + [0][0][RTW89_CN][8] = 32, + [0][0][RTW89_QATAR][8] = 32, + [0][0][RTW89_FCC][9] = 70, + [0][0][RTW89_ETSI][9] = 32, + [0][0][RTW89_MKK][9] = 40, + [0][0][RTW89_IC][9] = 70, + [0][0][RTW89_KCC][9] = 46, + [0][0][RTW89_ACMA][9] = 32, + [0][0][RTW89_CHILE][9] = 60, + [0][0][RTW89_UKRAINE][9] = 32, + [0][0][RTW89_MEXICO][9] = 70, + [0][0][RTW89_CN][9] = 32, + [0][0][RTW89_QATAR][9] = 32, + [0][0][RTW89_FCC][10] = 70, + [0][0][RTW89_ETSI][10] = 32, + [0][0][RTW89_MKK][10] = 40, + [0][0][RTW89_IC][10] = 70, + [0][0][RTW89_KCC][10] = 46, + [0][0][RTW89_ACMA][10] = 32, + [0][0][RTW89_CHILE][10] = 60, + [0][0][RTW89_UKRAINE][10] = 32, + [0][0][RTW89_MEXICO][10] = 70, + [0][0][RTW89_CN][10] = 32, + [0][0][RTW89_QATAR][10] = 32, + [0][0][RTW89_FCC][11] = 58, + [0][0][RTW89_ETSI][11] = 32, + [0][0][RTW89_MKK][11] = 40, + [0][0][RTW89_IC][11] = 58, + [0][0][RTW89_KCC][11] = 46, + [0][0][RTW89_ACMA][11] = 32, + [0][0][RTW89_CHILE][11] = 58, + [0][0][RTW89_UKRAINE][11] = 32, + [0][0][RTW89_MEXICO][11] = 58, + [0][0][RTW89_CN][11] = 32, + [0][0][RTW89_QATAR][11] = 32, + [0][0][RTW89_FCC][12] = 34, + [0][0][RTW89_ETSI][12] = 32, + [0][0][RTW89_MKK][12] = 40, + [0][0][RTW89_IC][12] = 34, + [0][0][RTW89_KCC][12] = 46, + [0][0][RTW89_ACMA][12] = 32, + [0][0][RTW89_CHILE][12] = 34, + [0][0][RTW89_UKRAINE][12] = 32, + [0][0][RTW89_MEXICO][12] = 34, + [0][0][RTW89_CN][12] = 32, + [0][0][RTW89_QATAR][12] = 32, + [0][0][RTW89_FCC][13] = 127, + [0][0][RTW89_ETSI][13] = 127, + [0][0][RTW89_MKK][13] = 127, + [0][0][RTW89_IC][13] = 127, + [0][0][RTW89_KCC][13] = 127, + [0][0][RTW89_ACMA][13] = 127, + [0][0][RTW89_CHILE][13] = 127, + [0][0][RTW89_UKRAINE][13] = 127, + [0][0][RTW89_MEXICO][13] = 127, + [0][0][RTW89_CN][13] = 127, + [0][0][RTW89_QATAR][13] = 127, + [0][1][RTW89_FCC][0] = 64, + [0][1][RTW89_ETSI][0] = 20, + [0][1][RTW89_MKK][0] = 28, + [0][1][RTW89_IC][0] = 64, + [0][1][RTW89_KCC][0] = 32, + [0][1][RTW89_ACMA][0] = 20, + [0][1][RTW89_CHILE][0] = 48, + [0][1][RTW89_UKRAINE][0] = 20, + [0][1][RTW89_MEXICO][0] = 64, + [0][1][RTW89_CN][0] = 20, + [0][1][RTW89_QATAR][0] = 20, + [0][1][RTW89_FCC][1] = 64, + [0][1][RTW89_ETSI][1] = 20, + [0][1][RTW89_MKK][1] = 28, + [0][1][RTW89_IC][1] = 64, + [0][1][RTW89_KCC][1] = 32, + [0][1][RTW89_ACMA][1] = 20, + [0][1][RTW89_CHILE][1] = 48, + [0][1][RTW89_UKRAINE][1] = 20, + [0][1][RTW89_MEXICO][1] = 64, + [0][1][RTW89_CN][1] = 20, + [0][1][RTW89_QATAR][1] = 20, + [0][1][RTW89_FCC][2] = 68, + [0][1][RTW89_ETSI][2] = 20, + [0][1][RTW89_MKK][2] = 28, + [0][1][RTW89_IC][2] = 68, + [0][1][RTW89_KCC][2] = 32, + [0][1][RTW89_ACMA][2] = 20, + [0][1][RTW89_CHILE][2] = 48, + [0][1][RTW89_UKRAINE][2] = 20, + [0][1][RTW89_MEXICO][2] = 68, + [0][1][RTW89_CN][2] = 20, + [0][1][RTW89_QATAR][2] = 20, + [0][1][RTW89_FCC][3] = 72, + [0][1][RTW89_ETSI][3] = 20, + [0][1][RTW89_MKK][3] = 28, + [0][1][RTW89_IC][3] = 72, + [0][1][RTW89_KCC][3] = 32, + [0][1][RTW89_ACMA][3] = 20, + [0][1][RTW89_CHILE][3] = 48, + [0][1][RTW89_UKRAINE][3] = 20, + [0][1][RTW89_MEXICO][3] = 72, + [0][1][RTW89_CN][3] = 20, + [0][1][RTW89_QATAR][3] = 20, + [0][1][RTW89_FCC][4] = 76, + [0][1][RTW89_ETSI][4] = 20, + [0][1][RTW89_MKK][4] = 28, + [0][1][RTW89_IC][4] = 76, + [0][1][RTW89_KCC][4] = 32, + [0][1][RTW89_ACMA][4] = 20, + [0][1][RTW89_CHILE][4] = 48, + [0][1][RTW89_UKRAINE][4] = 20, + [0][1][RTW89_MEXICO][4] = 76, + [0][1][RTW89_CN][4] = 20, + [0][1][RTW89_QATAR][4] = 20, + [0][1][RTW89_FCC][5] = 78, + [0][1][RTW89_ETSI][5] = 20, + [0][1][RTW89_MKK][5] = 28, + [0][1][RTW89_IC][5] = 78, + [0][1][RTW89_KCC][5] = 32, + [0][1][RTW89_ACMA][5] = 20, + [0][1][RTW89_CHILE][5] = 48, + [0][1][RTW89_UKRAINE][5] = 20, + [0][1][RTW89_MEXICO][5] = 78, + [0][1][RTW89_CN][5] = 20, + [0][1][RTW89_QATAR][5] = 20, + [0][1][RTW89_FCC][6] = 76, + [0][1][RTW89_ETSI][6] = 20, + [0][1][RTW89_MKK][6] = 28, + [0][1][RTW89_IC][6] = 76, + [0][1][RTW89_KCC][6] = 32, + [0][1][RTW89_ACMA][6] = 20, + [0][1][RTW89_CHILE][6] = 48, + [0][1][RTW89_UKRAINE][6] = 20, + [0][1][RTW89_MEXICO][6] = 76, + [0][1][RTW89_CN][6] = 20, + [0][1][RTW89_QATAR][6] = 20, + [0][1][RTW89_FCC][7] = 72, + [0][1][RTW89_ETSI][7] = 20, + [0][1][RTW89_MKK][7] = 28, + [0][1][RTW89_IC][7] = 72, + [0][1][RTW89_KCC][7] = 32, + [0][1][RTW89_ACMA][7] = 20, + [0][1][RTW89_CHILE][7] = 48, + [0][1][RTW89_UKRAINE][7] = 20, + [0][1][RTW89_MEXICO][7] = 72, + [0][1][RTW89_CN][7] = 20, + [0][1][RTW89_QATAR][7] = 20, + [0][1][RTW89_FCC][8] = 68, + [0][1][RTW89_ETSI][8] = 20, + [0][1][RTW89_MKK][8] = 28, + [0][1][RTW89_IC][8] = 68, + [0][1][RTW89_KCC][8] = 32, + [0][1][RTW89_ACMA][8] = 20, + [0][1][RTW89_CHILE][8] = 48, + [0][1][RTW89_UKRAINE][8] = 20, + [0][1][RTW89_MEXICO][8] = 68, + [0][1][RTW89_CN][8] = 20, + [0][1][RTW89_QATAR][8] = 20, + [0][1][RTW89_FCC][9] = 64, + [0][1][RTW89_ETSI][9] = 20, + [0][1][RTW89_MKK][9] = 28, + [0][1][RTW89_IC][9] = 64, + [0][1][RTW89_KCC][9] = 32, + [0][1][RTW89_ACMA][9] = 20, + [0][1][RTW89_CHILE][9] = 48, + [0][1][RTW89_UKRAINE][9] = 20, + [0][1][RTW89_MEXICO][9] = 64, + [0][1][RTW89_CN][9] = 20, + [0][1][RTW89_QATAR][9] = 20, + [0][1][RTW89_FCC][10] = 64, + [0][1][RTW89_ETSI][10] = 20, + [0][1][RTW89_MKK][10] = 28, + [0][1][RTW89_IC][10] = 64, + [0][1][RTW89_KCC][10] = 32, + [0][1][RTW89_ACMA][10] = 20, + [0][1][RTW89_CHILE][10] = 48, + [0][1][RTW89_UKRAINE][10] = 20, + [0][1][RTW89_MEXICO][10] = 64, + [0][1][RTW89_CN][10] = 20, + [0][1][RTW89_QATAR][10] = 20, + [0][1][RTW89_FCC][11] = 54, + [0][1][RTW89_ETSI][11] = 20, + [0][1][RTW89_MKK][11] = 28, + [0][1][RTW89_IC][11] = 54, + [0][1][RTW89_KCC][11] = 32, + [0][1][RTW89_ACMA][11] = 20, + [0][1][RTW89_CHILE][11] = 48, + [0][1][RTW89_UKRAINE][11] = 20, + [0][1][RTW89_MEXICO][11] = 54, + [0][1][RTW89_CN][11] = 20, + [0][1][RTW89_QATAR][11] = 20, + [0][1][RTW89_FCC][12] = 32, + [0][1][RTW89_ETSI][12] = 20, + [0][1][RTW89_MKK][12] = 28, + [0][1][RTW89_IC][12] = 32, + [0][1][RTW89_KCC][12] = 32, + [0][1][RTW89_ACMA][12] = 20, + [0][1][RTW89_CHILE][12] = 32, + [0][1][RTW89_UKRAINE][12] = 20, + [0][1][RTW89_MEXICO][12] = 32, + [0][1][RTW89_CN][12] = 20, + [0][1][RTW89_QATAR][12] = 20, + [0][1][RTW89_FCC][13] = 127, + [0][1][RTW89_ETSI][13] = 127, + [0][1][RTW89_MKK][13] = 127, + [0][1][RTW89_IC][13] = 127, + [0][1][RTW89_KCC][13] = 127, + [0][1][RTW89_ACMA][13] = 127, + [0][1][RTW89_CHILE][13] = 127, + [0][1][RTW89_UKRAINE][13] = 127, + [0][1][RTW89_MEXICO][13] = 127, + [0][1][RTW89_CN][13] = 127, + [0][1][RTW89_QATAR][13] = 127, + [1][0][RTW89_FCC][0] = 72, + [1][0][RTW89_ETSI][0] = 42, + [1][0][RTW89_MKK][0] = 50, + [1][0][RTW89_IC][0] = 72, + [1][0][RTW89_KCC][0] = 58, + [1][0][RTW89_ACMA][0] = 42, + [1][0][RTW89_CHILE][0] = 60, + [1][0][RTW89_UKRAINE][0] = 42, + [1][0][RTW89_MEXICO][0] = 72, + [1][0][RTW89_CN][0] = 42, + [1][0][RTW89_QATAR][0] = 42, + [1][0][RTW89_FCC][1] = 72, + [1][0][RTW89_ETSI][1] = 42, + [1][0][RTW89_MKK][1] = 50, + [1][0][RTW89_IC][1] = 72, + [1][0][RTW89_KCC][1] = 58, + [1][0][RTW89_ACMA][1] = 42, + [1][0][RTW89_CHILE][1] = 60, + [1][0][RTW89_UKRAINE][1] = 42, + [1][0][RTW89_MEXICO][1] = 72, + [1][0][RTW89_CN][1] = 42, + [1][0][RTW89_QATAR][1] = 42, + [1][0][RTW89_FCC][2] = 76, + [1][0][RTW89_ETSI][2] = 42, + [1][0][RTW89_MKK][2] = 50, + [1][0][RTW89_IC][2] = 76, + [1][0][RTW89_KCC][2] = 58, + [1][0][RTW89_ACMA][2] = 42, + [1][0][RTW89_CHILE][2] = 60, + [1][0][RTW89_UKRAINE][2] = 42, + [1][0][RTW89_MEXICO][2] = 76, + [1][0][RTW89_CN][2] = 42, + [1][0][RTW89_QATAR][2] = 42, + [1][0][RTW89_FCC][3] = 78, + [1][0][RTW89_ETSI][3] = 42, + [1][0][RTW89_MKK][3] = 50, + [1][0][RTW89_IC][3] = 78, + [1][0][RTW89_KCC][3] = 58, + [1][0][RTW89_ACMA][3] = 42, + [1][0][RTW89_CHILE][3] = 60, + [1][0][RTW89_UKRAINE][3] = 42, + [1][0][RTW89_MEXICO][3] = 78, + [1][0][RTW89_CN][3] = 42, + [1][0][RTW89_QATAR][3] = 42, + [1][0][RTW89_FCC][4] = 78, + [1][0][RTW89_ETSI][4] = 42, + [1][0][RTW89_MKK][4] = 50, + [1][0][RTW89_IC][4] = 78, + [1][0][RTW89_KCC][4] = 58, + [1][0][RTW89_ACMA][4] = 42, + [1][0][RTW89_CHILE][4] = 60, + [1][0][RTW89_UKRAINE][4] = 42, + [1][0][RTW89_MEXICO][4] = 78, + [1][0][RTW89_CN][4] = 42, + [1][0][RTW89_QATAR][4] = 42, + [1][0][RTW89_FCC][5] = 78, + [1][0][RTW89_ETSI][5] = 42, + [1][0][RTW89_MKK][5] = 50, + [1][0][RTW89_IC][5] = 78, + [1][0][RTW89_KCC][5] = 58, + [1][0][RTW89_ACMA][5] = 42, + [1][0][RTW89_CHILE][5] = 60, + [1][0][RTW89_UKRAINE][5] = 42, + [1][0][RTW89_MEXICO][5] = 78, + [1][0][RTW89_CN][5] = 42, + [1][0][RTW89_QATAR][5] = 42, + [1][0][RTW89_FCC][6] = 78, + [1][0][RTW89_ETSI][6] = 42, + [1][0][RTW89_MKK][6] = 50, + [1][0][RTW89_IC][6] = 78, + [1][0][RTW89_KCC][6] = 58, + [1][0][RTW89_ACMA][6] = 42, + [1][0][RTW89_CHILE][6] = 60, + [1][0][RTW89_UKRAINE][6] = 42, + [1][0][RTW89_MEXICO][6] = 78, + [1][0][RTW89_CN][6] = 42, + [1][0][RTW89_QATAR][6] = 42, + [1][0][RTW89_FCC][7] = 78, + [1][0][RTW89_ETSI][7] = 42, + [1][0][RTW89_MKK][7] = 50, + [1][0][RTW89_IC][7] = 78, + [1][0][RTW89_KCC][7] = 58, + [1][0][RTW89_ACMA][7] = 42, + [1][0][RTW89_CHILE][7] = 60, + [1][0][RTW89_UKRAINE][7] = 42, + [1][0][RTW89_MEXICO][7] = 78, + [1][0][RTW89_CN][7] = 42, + [1][0][RTW89_QATAR][7] = 42, + [1][0][RTW89_FCC][8] = 78, + [1][0][RTW89_ETSI][8] = 42, + [1][0][RTW89_MKK][8] = 50, + [1][0][RTW89_IC][8] = 78, + [1][0][RTW89_KCC][8] = 58, + [1][0][RTW89_ACMA][8] = 42, + [1][0][RTW89_CHILE][8] = 60, + [1][0][RTW89_UKRAINE][8] = 42, + [1][0][RTW89_MEXICO][8] = 78, + [1][0][RTW89_CN][8] = 42, + [1][0][RTW89_QATAR][8] = 42, + [1][0][RTW89_FCC][9] = 74, + [1][0][RTW89_ETSI][9] = 42, + [1][0][RTW89_MKK][9] = 50, + [1][0][RTW89_IC][9] = 74, + [1][0][RTW89_KCC][9] = 58, + [1][0][RTW89_ACMA][9] = 42, + [1][0][RTW89_CHILE][9] = 60, + [1][0][RTW89_UKRAINE][9] = 42, + [1][0][RTW89_MEXICO][9] = 74, + [1][0][RTW89_CN][9] = 42, + [1][0][RTW89_QATAR][9] = 42, + [1][0][RTW89_FCC][10] = 74, + [1][0][RTW89_ETSI][10] = 42, + [1][0][RTW89_MKK][10] = 50, + [1][0][RTW89_IC][10] = 74, + [1][0][RTW89_KCC][10] = 58, + [1][0][RTW89_ACMA][10] = 42, + [1][0][RTW89_CHILE][10] = 60, + [1][0][RTW89_UKRAINE][10] = 42, + [1][0][RTW89_MEXICO][10] = 74, + [1][0][RTW89_CN][10] = 42, + [1][0][RTW89_QATAR][10] = 42, + [1][0][RTW89_FCC][11] = 64, + [1][0][RTW89_ETSI][11] = 42, + [1][0][RTW89_MKK][11] = 50, + [1][0][RTW89_IC][11] = 64, + [1][0][RTW89_KCC][11] = 58, + [1][0][RTW89_ACMA][11] = 42, + [1][0][RTW89_CHILE][11] = 60, + [1][0][RTW89_UKRAINE][11] = 42, + [1][0][RTW89_MEXICO][11] = 64, + [1][0][RTW89_CN][11] = 42, + [1][0][RTW89_QATAR][11] = 42, + [1][0][RTW89_FCC][12] = 36, + [1][0][RTW89_ETSI][12] = 42, + [1][0][RTW89_MKK][12] = 50, + [1][0][RTW89_IC][12] = 36, + [1][0][RTW89_KCC][12] = 58, + [1][0][RTW89_ACMA][12] = 42, + [1][0][RTW89_CHILE][12] = 36, + [1][0][RTW89_UKRAINE][12] = 42, + [1][0][RTW89_MEXICO][12] = 36, + [1][0][RTW89_CN][12] = 42, + [1][0][RTW89_QATAR][12] = 42, + [1][0][RTW89_FCC][13] = 127, + [1][0][RTW89_ETSI][13] = 127, + [1][0][RTW89_MKK][13] = 127, + [1][0][RTW89_IC][13] = 127, + [1][0][RTW89_KCC][13] = 127, + [1][0][RTW89_ACMA][13] = 127, + [1][0][RTW89_CHILE][13] = 127, + [1][0][RTW89_UKRAINE][13] = 127, + [1][0][RTW89_MEXICO][13] = 127, + [1][0][RTW89_CN][13] = 127, + [1][0][RTW89_QATAR][13] = 127, + [1][1][RTW89_FCC][0] = 66, + [1][1][RTW89_ETSI][0] = 30, + [1][1][RTW89_MKK][0] = 38, + [1][1][RTW89_IC][0] = 66, + [1][1][RTW89_KCC][0] = 44, + [1][1][RTW89_ACMA][0] = 30, + [1][1][RTW89_CHILE][0] = 48, + [1][1][RTW89_UKRAINE][0] = 30, + [1][1][RTW89_MEXICO][0] = 66, + [1][1][RTW89_CN][0] = 30, + [1][1][RTW89_QATAR][0] = 30, + [1][1][RTW89_FCC][1] = 66, + [1][1][RTW89_ETSI][1] = 30, + [1][1][RTW89_MKK][1] = 38, + [1][1][RTW89_IC][1] = 66, + [1][1][RTW89_KCC][1] = 44, + [1][1][RTW89_ACMA][1] = 30, + [1][1][RTW89_CHILE][1] = 48, + [1][1][RTW89_UKRAINE][1] = 30, + [1][1][RTW89_MEXICO][1] = 66, + [1][1][RTW89_CN][1] = 30, + [1][1][RTW89_QATAR][1] = 30, + [1][1][RTW89_FCC][2] = 70, + [1][1][RTW89_ETSI][2] = 30, + [1][1][RTW89_MKK][2] = 38, + [1][1][RTW89_IC][2] = 70, + [1][1][RTW89_KCC][2] = 44, + [1][1][RTW89_ACMA][2] = 30, + [1][1][RTW89_CHILE][2] = 48, + [1][1][RTW89_UKRAINE][2] = 30, + [1][1][RTW89_MEXICO][2] = 70, + [1][1][RTW89_CN][2] = 30, + [1][1][RTW89_QATAR][2] = 30, + [1][1][RTW89_FCC][3] = 74, + [1][1][RTW89_ETSI][3] = 30, + [1][1][RTW89_MKK][3] = 38, + [1][1][RTW89_IC][3] = 74, + [1][1][RTW89_KCC][3] = 44, + [1][1][RTW89_ACMA][3] = 30, + [1][1][RTW89_CHILE][3] = 48, + [1][1][RTW89_UKRAINE][3] = 30, + [1][1][RTW89_MEXICO][3] = 74, + [1][1][RTW89_CN][3] = 30, + [1][1][RTW89_QATAR][3] = 30, + [1][1][RTW89_FCC][4] = 78, + [1][1][RTW89_ETSI][4] = 30, + [1][1][RTW89_MKK][4] = 38, + [1][1][RTW89_IC][4] = 78, + [1][1][RTW89_KCC][4] = 44, + [1][1][RTW89_ACMA][4] = 30, + [1][1][RTW89_CHILE][4] = 48, + [1][1][RTW89_UKRAINE][4] = 30, + [1][1][RTW89_MEXICO][4] = 78, + [1][1][RTW89_CN][4] = 30, + [1][1][RTW89_QATAR][4] = 30, + [1][1][RTW89_FCC][5] = 78, + [1][1][RTW89_ETSI][5] = 30, + [1][1][RTW89_MKK][5] = 38, + [1][1][RTW89_IC][5] = 78, + [1][1][RTW89_KCC][5] = 44, + [1][1][RTW89_ACMA][5] = 30, + [1][1][RTW89_CHILE][5] = 48, + [1][1][RTW89_UKRAINE][5] = 30, + [1][1][RTW89_MEXICO][5] = 78, + [1][1][RTW89_CN][5] = 30, + [1][1][RTW89_QATAR][5] = 30, + [1][1][RTW89_FCC][6] = 78, + [1][1][RTW89_ETSI][6] = 30, + [1][1][RTW89_MKK][6] = 38, + [1][1][RTW89_IC][6] = 78, + [1][1][RTW89_KCC][6] = 44, + [1][1][RTW89_ACMA][6] = 30, + [1][1][RTW89_CHILE][6] = 48, + [1][1][RTW89_UKRAINE][6] = 30, + [1][1][RTW89_MEXICO][6] = 78, + [1][1][RTW89_CN][6] = 30, + [1][1][RTW89_QATAR][6] = 30, + [1][1][RTW89_FCC][7] = 74, + [1][1][RTW89_ETSI][7] = 30, + [1][1][RTW89_MKK][7] = 38, + [1][1][RTW89_IC][7] = 74, + [1][1][RTW89_KCC][7] = 44, + [1][1][RTW89_ACMA][7] = 30, + [1][1][RTW89_CHILE][7] = 48, + [1][1][RTW89_UKRAINE][7] = 30, + [1][1][RTW89_MEXICO][7] = 74, + [1][1][RTW89_CN][7] = 30, + [1][1][RTW89_QATAR][7] = 30, + [1][1][RTW89_FCC][8] = 70, + [1][1][RTW89_ETSI][8] = 30, + [1][1][RTW89_MKK][8] = 38, + [1][1][RTW89_IC][8] = 70, + [1][1][RTW89_KCC][8] = 44, + [1][1][RTW89_ACMA][8] = 30, + [1][1][RTW89_CHILE][8] = 48, + [1][1][RTW89_UKRAINE][8] = 30, + [1][1][RTW89_MEXICO][8] = 70, + [1][1][RTW89_CN][8] = 30, + [1][1][RTW89_QATAR][8] = 30, + [1][1][RTW89_FCC][9] = 66, + [1][1][RTW89_ETSI][9] = 30, + [1][1][RTW89_MKK][9] = 38, + [1][1][RTW89_IC][9] = 66, + [1][1][RTW89_KCC][9] = 44, + [1][1][RTW89_ACMA][9] = 30, + [1][1][RTW89_CHILE][9] = 48, + [1][1][RTW89_UKRAINE][9] = 30, + [1][1][RTW89_MEXICO][9] = 66, + [1][1][RTW89_CN][9] = 30, + [1][1][RTW89_QATAR][9] = 30, + [1][1][RTW89_FCC][10] = 66, + [1][1][RTW89_ETSI][10] = 30, + [1][1][RTW89_MKK][10] = 38, + [1][1][RTW89_IC][10] = 66, + [1][1][RTW89_KCC][10] = 44, + [1][1][RTW89_ACMA][10] = 30, + [1][1][RTW89_CHILE][10] = 48, + [1][1][RTW89_UKRAINE][10] = 30, + [1][1][RTW89_MEXICO][10] = 66, + [1][1][RTW89_CN][10] = 30, + [1][1][RTW89_QATAR][10] = 30, + [1][1][RTW89_FCC][11] = 60, + [1][1][RTW89_ETSI][11] = 30, + [1][1][RTW89_MKK][11] = 38, + [1][1][RTW89_IC][11] = 60, + [1][1][RTW89_KCC][11] = 44, + [1][1][RTW89_ACMA][11] = 30, + [1][1][RTW89_CHILE][11] = 48, + [1][1][RTW89_UKRAINE][11] = 30, + [1][1][RTW89_MEXICO][11] = 60, + [1][1][RTW89_CN][11] = 30, + [1][1][RTW89_QATAR][11] = 30, + [1][1][RTW89_FCC][12] = 32, + [1][1][RTW89_ETSI][12] = 30, + [1][1][RTW89_MKK][12] = 38, + [1][1][RTW89_IC][12] = 32, + [1][1][RTW89_KCC][12] = 44, + [1][1][RTW89_ACMA][12] = 30, + [1][1][RTW89_CHILE][12] = 32, + [1][1][RTW89_UKRAINE][12] = 30, + [1][1][RTW89_MEXICO][12] = 32, + [1][1][RTW89_CN][12] = 30, + [1][1][RTW89_QATAR][12] = 30, + [1][1][RTW89_FCC][13] = 127, + [1][1][RTW89_ETSI][13] = 127, + [1][1][RTW89_MKK][13] = 127, + [1][1][RTW89_IC][13] = 127, + [1][1][RTW89_KCC][13] = 127, + [1][1][RTW89_ACMA][13] = 127, + [1][1][RTW89_CHILE][13] = 127, + [1][1][RTW89_UKRAINE][13] = 127, + [1][1][RTW89_MEXICO][13] = 127, + [1][1][RTW89_CN][13] = 127, + [1][1][RTW89_QATAR][13] = 127, + [2][0][RTW89_FCC][0] = 76, + [2][0][RTW89_ETSI][0] = 52, + [2][0][RTW89_MKK][0] = 64, + [2][0][RTW89_IC][0] = 76, + [2][0][RTW89_KCC][0] = 70, + [2][0][RTW89_ACMA][0] = 52, + [2][0][RTW89_CHILE][0] = 60, + [2][0][RTW89_UKRAINE][0] = 52, + [2][0][RTW89_MEXICO][0] = 76, + [2][0][RTW89_CN][0] = 52, + [2][0][RTW89_QATAR][0] = 52, + [2][0][RTW89_FCC][1] = 76, + [2][0][RTW89_ETSI][1] = 52, + [2][0][RTW89_MKK][1] = 64, + [2][0][RTW89_IC][1] = 76, + [2][0][RTW89_KCC][1] = 70, + [2][0][RTW89_ACMA][1] = 52, + [2][0][RTW89_CHILE][1] = 60, + [2][0][RTW89_UKRAINE][1] = 52, + [2][0][RTW89_MEXICO][1] = 76, + [2][0][RTW89_CN][1] = 52, + [2][0][RTW89_QATAR][1] = 52, + [2][0][RTW89_FCC][2] = 78, + [2][0][RTW89_ETSI][2] = 52, + [2][0][RTW89_MKK][2] = 64, + [2][0][RTW89_IC][2] = 78, + [2][0][RTW89_KCC][2] = 70, + [2][0][RTW89_ACMA][2] = 52, + [2][0][RTW89_CHILE][2] = 60, + [2][0][RTW89_UKRAINE][2] = 52, + [2][0][RTW89_MEXICO][2] = 78, + [2][0][RTW89_CN][2] = 52, + [2][0][RTW89_QATAR][2] = 52, + [2][0][RTW89_FCC][3] = 78, + [2][0][RTW89_ETSI][3] = 52, + [2][0][RTW89_MKK][3] = 64, + [2][0][RTW89_IC][3] = 78, + [2][0][RTW89_KCC][3] = 70, + [2][0][RTW89_ACMA][3] = 52, + [2][0][RTW89_CHILE][3] = 60, + [2][0][RTW89_UKRAINE][3] = 52, + [2][0][RTW89_MEXICO][3] = 78, + [2][0][RTW89_CN][3] = 52, + [2][0][RTW89_QATAR][3] = 52, + [2][0][RTW89_FCC][4] = 78, + [2][0][RTW89_ETSI][4] = 52, + [2][0][RTW89_MKK][4] = 64, + [2][0][RTW89_IC][4] = 78, + [2][0][RTW89_KCC][4] = 70, + [2][0][RTW89_ACMA][4] = 52, + [2][0][RTW89_CHILE][4] = 60, + [2][0][RTW89_UKRAINE][4] = 52, + [2][0][RTW89_MEXICO][4] = 78, + [2][0][RTW89_CN][4] = 52, + [2][0][RTW89_QATAR][4] = 52, + [2][0][RTW89_FCC][5] = 78, + [2][0][RTW89_ETSI][5] = 52, + [2][0][RTW89_MKK][5] = 64, + [2][0][RTW89_IC][5] = 78, + [2][0][RTW89_KCC][5] = 70, + [2][0][RTW89_ACMA][5] = 52, + [2][0][RTW89_CHILE][5] = 60, + [2][0][RTW89_UKRAINE][5] = 52, + [2][0][RTW89_MEXICO][5] = 78, + [2][0][RTW89_CN][5] = 52, + [2][0][RTW89_QATAR][5] = 52, + [2][0][RTW89_FCC][6] = 78, + [2][0][RTW89_ETSI][6] = 52, + [2][0][RTW89_MKK][6] = 64, + [2][0][RTW89_IC][6] = 78, + [2][0][RTW89_KCC][6] = 70, + [2][0][RTW89_ACMA][6] = 52, + [2][0][RTW89_CHILE][6] = 60, + [2][0][RTW89_UKRAINE][6] = 52, + [2][0][RTW89_MEXICO][6] = 78, + [2][0][RTW89_CN][6] = 52, + [2][0][RTW89_QATAR][6] = 52, + [2][0][RTW89_FCC][7] = 78, + [2][0][RTW89_ETSI][7] = 52, + [2][0][RTW89_MKK][7] = 64, + [2][0][RTW89_IC][7] = 78, + [2][0][RTW89_KCC][7] = 70, + [2][0][RTW89_ACMA][7] = 52, + [2][0][RTW89_CHILE][7] = 60, + [2][0][RTW89_UKRAINE][7] = 52, + [2][0][RTW89_MEXICO][7] = 78, + [2][0][RTW89_CN][7] = 52, + [2][0][RTW89_QATAR][7] = 52, + [2][0][RTW89_FCC][8] = 78, + [2][0][RTW89_ETSI][8] = 52, + [2][0][RTW89_MKK][8] = 64, + [2][0][RTW89_IC][8] = 78, + [2][0][RTW89_KCC][8] = 70, + [2][0][RTW89_ACMA][8] = 52, + [2][0][RTW89_CHILE][8] = 60, + [2][0][RTW89_UKRAINE][8] = 52, + [2][0][RTW89_MEXICO][8] = 78, + [2][0][RTW89_CN][8] = 52, + [2][0][RTW89_QATAR][8] = 52, + [2][0][RTW89_FCC][9] = 76, + [2][0][RTW89_ETSI][9] = 52, + [2][0][RTW89_MKK][9] = 64, + [2][0][RTW89_IC][9] = 76, + [2][0][RTW89_KCC][9] = 70, + [2][0][RTW89_ACMA][9] = 52, + [2][0][RTW89_CHILE][9] = 60, + [2][0][RTW89_UKRAINE][9] = 52, + [2][0][RTW89_MEXICO][9] = 76, + [2][0][RTW89_CN][9] = 52, + [2][0][RTW89_QATAR][9] = 52, + [2][0][RTW89_FCC][10] = 76, + [2][0][RTW89_ETSI][10] = 52, + [2][0][RTW89_MKK][10] = 64, + [2][0][RTW89_IC][10] = 76, + [2][0][RTW89_KCC][10] = 70, + [2][0][RTW89_ACMA][10] = 52, + [2][0][RTW89_CHILE][10] = 60, + [2][0][RTW89_UKRAINE][10] = 52, + [2][0][RTW89_MEXICO][10] = 76, + [2][0][RTW89_CN][10] = 52, + [2][0][RTW89_QATAR][10] = 52, + [2][0][RTW89_FCC][11] = 68, + [2][0][RTW89_ETSI][11] = 52, + [2][0][RTW89_MKK][11] = 64, + [2][0][RTW89_IC][11] = 68, + [2][0][RTW89_KCC][11] = 70, + [2][0][RTW89_ACMA][11] = 52, + [2][0][RTW89_CHILE][11] = 60, + [2][0][RTW89_UKRAINE][11] = 52, + [2][0][RTW89_MEXICO][11] = 68, + [2][0][RTW89_CN][11] = 52, + [2][0][RTW89_QATAR][11] = 52, + [2][0][RTW89_FCC][12] = 40, + [2][0][RTW89_ETSI][12] = 52, + [2][0][RTW89_MKK][12] = 64, + [2][0][RTW89_IC][12] = 40, + [2][0][RTW89_KCC][12] = 70, + [2][0][RTW89_ACMA][12] = 52, + [2][0][RTW89_CHILE][12] = 40, + [2][0][RTW89_UKRAINE][12] = 52, + [2][0][RTW89_MEXICO][12] = 40, + [2][0][RTW89_CN][12] = 52, + [2][0][RTW89_QATAR][12] = 52, + [2][0][RTW89_FCC][13] = 127, + [2][0][RTW89_ETSI][13] = 127, + [2][0][RTW89_MKK][13] = 127, + [2][0][RTW89_IC][13] = 127, + [2][0][RTW89_KCC][13] = 127, + [2][0][RTW89_ACMA][13] = 127, + [2][0][RTW89_CHILE][13] = 127, + [2][0][RTW89_UKRAINE][13] = 127, + [2][0][RTW89_MEXICO][13] = 127, + [2][0][RTW89_CN][13] = 127, + [2][0][RTW89_QATAR][13] = 127, + [2][1][RTW89_FCC][0] = 68, + [2][1][RTW89_ETSI][0] = 40, + [2][1][RTW89_MKK][0] = 52, + [2][1][RTW89_IC][0] = 68, + [2][1][RTW89_KCC][0] = 56, + [2][1][RTW89_ACMA][0] = 40, + [2][1][RTW89_CHILE][0] = 48, + [2][1][RTW89_UKRAINE][0] = 40, + [2][1][RTW89_MEXICO][0] = 68, + [2][1][RTW89_CN][0] = 40, + [2][1][RTW89_QATAR][0] = 40, + [2][1][RTW89_FCC][1] = 68, + [2][1][RTW89_ETSI][1] = 40, + [2][1][RTW89_MKK][1] = 52, + [2][1][RTW89_IC][1] = 68, + [2][1][RTW89_KCC][1] = 56, + [2][1][RTW89_ACMA][1] = 40, + [2][1][RTW89_CHILE][1] = 48, + [2][1][RTW89_UKRAINE][1] = 40, + [2][1][RTW89_MEXICO][1] = 68, + [2][1][RTW89_CN][1] = 40, + [2][1][RTW89_QATAR][1] = 40, + [2][1][RTW89_FCC][2] = 72, + [2][1][RTW89_ETSI][2] = 40, + [2][1][RTW89_MKK][2] = 52, + [2][1][RTW89_IC][2] = 72, + [2][1][RTW89_KCC][2] = 56, + [2][1][RTW89_ACMA][2] = 40, + [2][1][RTW89_CHILE][2] = 48, + [2][1][RTW89_UKRAINE][2] = 40, + [2][1][RTW89_MEXICO][2] = 72, + [2][1][RTW89_CN][2] = 40, + [2][1][RTW89_QATAR][2] = 40, + [2][1][RTW89_FCC][3] = 76, + [2][1][RTW89_ETSI][3] = 40, + [2][1][RTW89_MKK][3] = 52, + [2][1][RTW89_IC][3] = 76, + [2][1][RTW89_KCC][3] = 56, + [2][1][RTW89_ACMA][3] = 40, + [2][1][RTW89_CHILE][3] = 48, + [2][1][RTW89_UKRAINE][3] = 40, + [2][1][RTW89_MEXICO][3] = 76, + [2][1][RTW89_CN][3] = 40, + [2][1][RTW89_QATAR][3] = 40, + [2][1][RTW89_FCC][4] = 78, + [2][1][RTW89_ETSI][4] = 40, + [2][1][RTW89_MKK][4] = 52, + [2][1][RTW89_IC][4] = 78, + [2][1][RTW89_KCC][4] = 56, + [2][1][RTW89_ACMA][4] = 40, + [2][1][RTW89_CHILE][4] = 48, + [2][1][RTW89_UKRAINE][4] = 40, + [2][1][RTW89_MEXICO][4] = 78, + [2][1][RTW89_CN][4] = 40, + [2][1][RTW89_QATAR][4] = 40, + [2][1][RTW89_FCC][5] = 78, + [2][1][RTW89_ETSI][5] = 40, + [2][1][RTW89_MKK][5] = 52, + [2][1][RTW89_IC][5] = 78, + [2][1][RTW89_KCC][5] = 56, + [2][1][RTW89_ACMA][5] = 40, + [2][1][RTW89_CHILE][5] = 48, + [2][1][RTW89_UKRAINE][5] = 40, + [2][1][RTW89_MEXICO][5] = 78, + [2][1][RTW89_CN][5] = 40, + [2][1][RTW89_QATAR][5] = 40, + [2][1][RTW89_FCC][6] = 78, + [2][1][RTW89_ETSI][6] = 40, + [2][1][RTW89_MKK][6] = 52, + [2][1][RTW89_IC][6] = 78, + [2][1][RTW89_KCC][6] = 56, + [2][1][RTW89_ACMA][6] = 40, + [2][1][RTW89_CHILE][6] = 48, + [2][1][RTW89_UKRAINE][6] = 40, + [2][1][RTW89_MEXICO][6] = 78, + [2][1][RTW89_CN][6] = 40, + [2][1][RTW89_QATAR][6] = 40, + [2][1][RTW89_FCC][7] = 78, + [2][1][RTW89_ETSI][7] = 40, + [2][1][RTW89_MKK][7] = 52, + [2][1][RTW89_IC][7] = 78, + [2][1][RTW89_KCC][7] = 56, + [2][1][RTW89_ACMA][7] = 40, + [2][1][RTW89_CHILE][7] = 48, + [2][1][RTW89_UKRAINE][7] = 40, + [2][1][RTW89_MEXICO][7] = 78, + [2][1][RTW89_CN][7] = 40, + [2][1][RTW89_QATAR][7] = 40, + [2][1][RTW89_FCC][8] = 74, + [2][1][RTW89_ETSI][8] = 40, + [2][1][RTW89_MKK][8] = 52, + [2][1][RTW89_IC][8] = 74, + [2][1][RTW89_KCC][8] = 56, + [2][1][RTW89_ACMA][8] = 40, + [2][1][RTW89_CHILE][8] = 48, + [2][1][RTW89_UKRAINE][8] = 40, + [2][1][RTW89_MEXICO][8] = 74, + [2][1][RTW89_CN][8] = 40, + [2][1][RTW89_QATAR][8] = 40, + [2][1][RTW89_FCC][9] = 70, + [2][1][RTW89_ETSI][9] = 40, + [2][1][RTW89_MKK][9] = 52, + [2][1][RTW89_IC][9] = 70, + [2][1][RTW89_KCC][9] = 56, + [2][1][RTW89_ACMA][9] = 40, + [2][1][RTW89_CHILE][9] = 48, + [2][1][RTW89_UKRAINE][9] = 40, + [2][1][RTW89_MEXICO][9] = 70, + [2][1][RTW89_CN][9] = 40, + [2][1][RTW89_QATAR][9] = 40, + [2][1][RTW89_FCC][10] = 70, + [2][1][RTW89_ETSI][10] = 40, + [2][1][RTW89_MKK][10] = 52, + [2][1][RTW89_IC][10] = 70, + [2][1][RTW89_KCC][10] = 56, + [2][1][RTW89_ACMA][10] = 40, + [2][1][RTW89_CHILE][10] = 48, + [2][1][RTW89_UKRAINE][10] = 40, + [2][1][RTW89_MEXICO][10] = 70, + [2][1][RTW89_CN][10] = 40, + [2][1][RTW89_QATAR][10] = 40, + [2][1][RTW89_FCC][11] = 48, + [2][1][RTW89_ETSI][11] = 40, + [2][1][RTW89_MKK][11] = 52, + [2][1][RTW89_IC][11] = 48, + [2][1][RTW89_KCC][11] = 56, + [2][1][RTW89_ACMA][11] = 40, + [2][1][RTW89_CHILE][11] = 48, + [2][1][RTW89_UKRAINE][11] = 40, + [2][1][RTW89_MEXICO][11] = 48, + [2][1][RTW89_CN][11] = 40, + [2][1][RTW89_QATAR][11] = 40, + [2][1][RTW89_FCC][12] = 26, + [2][1][RTW89_ETSI][12] = 40, + [2][1][RTW89_MKK][12] = 52, + [2][1][RTW89_IC][12] = 26, + [2][1][RTW89_KCC][12] = 56, + [2][1][RTW89_ACMA][12] = 40, + [2][1][RTW89_CHILE][12] = 26, + [2][1][RTW89_UKRAINE][12] = 40, + [2][1][RTW89_MEXICO][12] = 26, + [2][1][RTW89_CN][12] = 40, + [2][1][RTW89_QATAR][12] = 40, + [2][1][RTW89_FCC][13] = 127, + [2][1][RTW89_ETSI][13] = 127, + [2][1][RTW89_MKK][13] = 127, + [2][1][RTW89_IC][13] = 127, + [2][1][RTW89_KCC][13] = 127, + [2][1][RTW89_ACMA][13] = 127, + [2][1][RTW89_CHILE][13] = 127, + [2][1][RTW89_UKRAINE][13] = 127, + [2][1][RTW89_MEXICO][13] = 127, + [2][1][RTW89_CN][13] = 127, + [2][1][RTW89_QATAR][13] = 127, }; const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = { - [0][0][0][0] = 22, - [0][0][0][2] = 22, - [0][0][0][4] = 22, - [0][0][0][6] = 22, - [0][0][0][8] = 24, - [0][0][0][10] = 24, - [0][0][0][12] = 24, - [0][0][0][14] = 24, - [0][0][0][15] = 24, - [0][0][0][17] = 24, - [0][0][0][19] = 24, - [0][0][0][21] = 24, - [0][0][0][23] = 24, - [0][0][0][25] = 24, - [0][0][0][27] = 24, - [0][0][0][29] = 24, - [0][0][0][31] = 24, - [0][0][0][33] = 24, - [0][0][0][35] = 24, - [0][0][0][37] = 24, - [0][0][0][38] = 28, - [0][0][0][40] = 28, - [0][0][0][42] = 28, - [0][0][0][44] = 28, - [0][0][0][46] = 28, - [0][1][0][0] = 8, - [0][1][0][2] = 8, - [0][1][0][4] = 8, - [0][1][0][6] = 8, - [0][1][0][8] = 12, - [0][1][0][10] = 12, - [0][1][0][12] = 12, - [0][1][0][14] = 12, - [0][1][0][15] = 12, - [0][1][0][17] = 12, - [0][1][0][19] = 12, - [0][1][0][21] = 12, - [0][1][0][23] = 12, - [0][1][0][25] = 12, - [0][1][0][27] = 12, - [0][1][0][29] = 12, - [0][1][0][31] = 12, - [0][1][0][33] = 12, - [0][1][0][35] = 12, - [0][1][0][37] = 12, - [0][1][0][38] = 16, - [0][1][0][40] = 16, - [0][1][0][42] = 16, - [0][1][0][44] = 16, - [0][1][0][46] = 16, - [1][0][0][0] = 30, - [1][0][0][2] = 30, - [1][0][0][4] = 30, - [1][0][0][6] = 30, - [1][0][0][8] = 36, - [1][0][0][10] = 36, - [1][0][0][12] = 36, - [1][0][0][14] = 36, - [1][0][0][15] = 36, - [1][0][0][17] = 36, - [1][0][0][19] = 36, - [1][0][0][21] = 36, - [1][0][0][23] = 36, - [1][0][0][25] = 36, - [1][0][0][27] = 36, - [1][0][0][29] = 36, - [1][0][0][31] = 36, - [1][0][0][33] = 36, - [1][0][0][35] = 36, - [1][0][0][37] = 36, - [1][0][0][38] = 28, - [1][0][0][40] = 28, - [1][0][0][42] = 28, - [1][0][0][44] = 28, - [1][0][0][46] = 28, - [1][1][0][0] = 18, - [1][1][0][2] = 18, - [1][1][0][4] = 18, - [1][1][0][6] = 18, - [1][1][0][8] = 22, - [1][1][0][10] = 22, - [1][1][0][12] = 22, - [1][1][0][14] = 22, - [1][1][0][15] = 22, - [1][1][0][17] = 22, - [1][1][0][19] = 22, - [1][1][0][21] = 22, - [1][1][0][23] = 22, - [1][1][0][25] = 22, - [1][1][0][27] = 22, - [1][1][0][29] = 22, - [1][1][0][31] = 22, - [1][1][0][33] = 22, - [1][1][0][35] = 22, - [1][1][0][37] = 22, - [1][1][0][38] = 16, - [1][1][0][40] = 16, - [1][1][0][42] = 16, - [1][1][0][44] = 16, - [1][1][0][46] = 16, - [2][0][0][0] = 30, - [2][0][0][2] = 30, - [2][0][0][4] = 30, - [2][0][0][6] = 30, - [2][0][0][8] = 46, - [2][0][0][10] = 46, - [2][0][0][12] = 46, - [2][0][0][14] = 46, - [2][0][0][15] = 46, - [2][0][0][17] = 46, - [2][0][0][19] = 46, - [2][0][0][21] = 46, - [2][0][0][23] = 46, - [2][0][0][25] = 46, - [2][0][0][27] = 46, - [2][0][0][29] = 46, - [2][0][0][31] = 46, - [2][0][0][33] = 46, - [2][0][0][35] = 46, - [2][0][0][37] = 46, - [2][0][0][38] = 28, - [2][0][0][40] = 28, - [2][0][0][42] = 28, - [2][0][0][44] = 28, - [2][0][0][46] = 28, - [2][1][0][0] = 18, - [2][1][0][2] = 18, - [2][1][0][4] = 18, - [2][1][0][6] = 18, - [2][1][0][8] = 32, - [2][1][0][10] = 32, - [2][1][0][12] = 32, - [2][1][0][14] = 32, - [2][1][0][15] = 32, - [2][1][0][17] = 32, - [2][1][0][19] = 32, - [2][1][0][21] = 32, - [2][1][0][23] = 32, - [2][1][0][25] = 32, - [2][1][0][27] = 32, - [2][1][0][29] = 32, - [2][1][0][31] = 32, - [2][1][0][33] = 32, - [2][1][0][35] = 32, - [2][1][0][37] = 32, - [2][1][0][38] = 16, - [2][1][0][40] = 16, - [2][1][0][42] = 16, - [2][1][0][44] = 16, - [2][1][0][46] = 16, - [0][0][2][0] = 48, - [0][0][1][0] = 24, - [0][0][3][0] = 26, - [0][0][5][0] = 22, - [0][0][6][0] = 24, - [0][0][9][0] = 24, - [0][0][8][0] = 30, - [0][0][11][0] = 24, - [0][0][2][2] = 48, - [0][0][1][2] = 24, - [0][0][3][2] = 26, - [0][0][5][2] = 22, - [0][0][6][2] = 24, - [0][0][9][2] = 24, - [0][0][8][2] = 30, - [0][0][11][2] = 24, - [0][0][2][4] = 48, - [0][0][1][4] = 24, - [0][0][3][4] = 26, - [0][0][5][4] = 22, - [0][0][6][4] = 24, - [0][0][9][4] = 24, - [0][0][8][4] = 30, - [0][0][11][4] = 24, - [0][0][2][6] = 48, - [0][0][1][6] = 24, - [0][0][3][6] = 26, - [0][0][5][6] = 22, - [0][0][6][6] = 24, - [0][0][9][6] = 24, - [0][0][8][6] = 30, - [0][0][11][6] = 24, - [0][0][2][8] = 48, - [0][0][1][8] = 24, - [0][0][3][8] = 26, - [0][0][5][8] = 48, - [0][0][6][8] = 24, - [0][0][9][8] = 24, - [0][0][8][8] = 54, - [0][0][11][8] = 24, - [0][0][2][10] = 48, - [0][0][1][10] = 24, - [0][0][3][10] = 26, - [0][0][5][10] = 48, - [0][0][6][10] = 24, - [0][0][9][10] = 24, - [0][0][8][10] = 54, - [0][0][11][10] = 24, - [0][0][2][12] = 48, - [0][0][1][12] = 24, - [0][0][3][12] = 26, - [0][0][5][12] = 48, - [0][0][6][12] = 24, - [0][0][9][12] = 24, - [0][0][8][12] = 54, - [0][0][11][12] = 24, - [0][0][2][14] = 48, - [0][0][1][14] = 24, - [0][0][3][14] = 26, - [0][0][5][14] = 48, - [0][0][6][14] = 24, - [0][0][9][14] = 24, - [0][0][8][14] = 54, - [0][0][11][14] = 24, - [0][0][2][15] = 48, - [0][0][1][15] = 24, - [0][0][3][15] = 44, - [0][0][5][15] = 48, - [0][0][6][15] = 24, - [0][0][9][15] = 24, - [0][0][8][15] = 54, - [0][0][11][15] = 24, - [0][0][2][17] = 48, - [0][0][1][17] = 24, - [0][0][3][17] = 44, - [0][0][5][17] = 48, - [0][0][6][17] = 24, - [0][0][9][17] = 24, - [0][0][8][17] = 54, - [0][0][11][17] = 24, - [0][0][2][19] = 48, - [0][0][1][19] = 24, - [0][0][3][19] = 44, - [0][0][5][19] = 48, - [0][0][6][19] = 24, - [0][0][9][19] = 24, - [0][0][8][19] = 54, - [0][0][11][19] = 24, - [0][0][2][21] = 48, - [0][0][1][21] = 24, - [0][0][3][21] = 44, - [0][0][5][21] = 48, - [0][0][6][21] = 24, - [0][0][9][21] = 24, - [0][0][8][21] = 54, - [0][0][11][21] = 24, - [0][0][2][23] = 48, - [0][0][1][23] = 24, - [0][0][3][23] = 44, - [0][0][5][23] = 48, - [0][0][6][23] = 24, - [0][0][9][23] = 24, - [0][0][8][23] = 54, - [0][0][11][23] = 24, - [0][0][2][25] = 48, - [0][0][1][25] = 24, - [0][0][3][25] = 44, - [0][0][5][25] = 127, - [0][0][6][25] = 24, - [0][0][9][25] = 127, - [0][0][8][25] = 54, - [0][0][11][25] = 24, - [0][0][2][27] = 48, - [0][0][1][27] = 24, - [0][0][3][27] = 44, - [0][0][5][27] = 127, - [0][0][6][27] = 24, - [0][0][9][27] = 127, - [0][0][8][27] = 54, - [0][0][11][27] = 24, - [0][0][2][29] = 48, - [0][0][1][29] = 24, - [0][0][3][29] = 44, - [0][0][5][29] = 127, - [0][0][6][29] = 24, - [0][0][9][29] = 127, - [0][0][8][29] = 54, - [0][0][11][29] = 24, - [0][0][2][31] = 48, - [0][0][1][31] = 24, - [0][0][3][31] = 44, - [0][0][5][31] = 48, - [0][0][6][31] = 24, - [0][0][9][31] = 24, - [0][0][8][31] = 54, - [0][0][11][31] = 24, - [0][0][2][33] = 48, - [0][0][1][33] = 24, - [0][0][3][33] = 44, - [0][0][5][33] = 48, - [0][0][6][33] = 24, - [0][0][9][33] = 24, - [0][0][8][33] = 54, - [0][0][11][33] = 24, - [0][0][2][35] = 48, - [0][0][1][35] = 24, - [0][0][3][35] = 44, - [0][0][5][35] = 48, - [0][0][6][35] = 24, - [0][0][9][35] = 24, - [0][0][8][35] = 54, - [0][0][11][35] = 24, - [0][0][2][37] = 48, - [0][0][1][37] = 127, - [0][0][3][37] = 44, - [0][0][5][37] = 48, - [0][0][6][37] = 24, - [0][0][9][37] = 48, - [0][0][8][37] = 54, - [0][0][11][37] = 127, - [0][0][2][38] = 76, - [0][0][1][38] = 28, - [0][0][3][38] = 127, - [0][0][5][38] = 76, - [0][0][6][38] = 28, - [0][0][9][38] = 76, - [0][0][8][38] = 54, - [0][0][11][38] = 28, - [0][0][2][40] = 76, - [0][0][1][40] = 28, - [0][0][3][40] = 127, - [0][0][5][40] = 76, - [0][0][6][40] = 28, - [0][0][9][40] = 76, - [0][0][8][40] = 54, - [0][0][11][40] = 28, - [0][0][2][42] = 76, - [0][0][1][42] = 28, - [0][0][3][42] = 127, - [0][0][5][42] = 76, - [0][0][6][42] = 28, - [0][0][9][42] = 76, - [0][0][8][42] = 54, - [0][0][11][42] = 28, - [0][0][2][44] = 76, - [0][0][1][44] = 28, - [0][0][3][44] = 127, - [0][0][5][44] = 76, - [0][0][6][44] = 28, - [0][0][9][44] = 76, - [0][0][8][44] = 54, - [0][0][11][44] = 28, - [0][0][2][46] = 76, - [0][0][1][46] = 28, - [0][0][3][46] = 127, - [0][0][5][46] = 76, - [0][0][6][46] = 28, - [0][0][9][46] = 76, - [0][0][8][46] = 54, - [0][0][11][46] = 28, - [0][1][2][0] = 36, - [0][1][1][0] = 12, - [0][1][3][0] = 14, - [0][1][5][0] = 8, - [0][1][6][0] = 12, - [0][1][9][0] = 12, - [0][1][8][0] = 18, - [0][1][11][0] = 12, - [0][1][2][2] = 36, - [0][1][1][2] = 12, - [0][1][3][2] = 14, - [0][1][5][2] = 8, - [0][1][6][2] = 12, - [0][1][9][2] = 12, - [0][1][8][2] = 18, - [0][1][11][2] = 12, - [0][1][2][4] = 36, - [0][1][1][4] = 12, - [0][1][3][4] = 14, - [0][1][5][4] = 8, - [0][1][6][4] = 12, - [0][1][9][4] = 12, - [0][1][8][4] = 18, - [0][1][11][4] = 12, - [0][1][2][6] = 36, - [0][1][1][6] = 12, - [0][1][3][6] = 14, - [0][1][5][6] = 8, - [0][1][6][6] = 12, - [0][1][9][6] = 12, - [0][1][8][6] = 18, - [0][1][11][6] = 12, - [0][1][2][8] = 36, - [0][1][1][8] = 12, - [0][1][3][8] = 14, - [0][1][5][8] = 36, - [0][1][6][8] = 12, - [0][1][9][8] = 12, - [0][1][8][8] = 42, - [0][1][11][8] = 12, - [0][1][2][10] = 36, - [0][1][1][10] = 12, - [0][1][3][10] = 14, - [0][1][5][10] = 36, - [0][1][6][10] = 12, - [0][1][9][10] = 12, - [0][1][8][10] = 42, - [0][1][11][10] = 12, - [0][1][2][12] = 36, - [0][1][1][12] = 12, - [0][1][3][12] = 14, - [0][1][5][12] = 36, - [0][1][6][12] = 12, - [0][1][9][12] = 12, - [0][1][8][12] = 42, - [0][1][11][12] = 12, - [0][1][2][14] = 36, - [0][1][1][14] = 12, - [0][1][3][14] = 14, - [0][1][5][14] = 36, - [0][1][6][14] = 12, - [0][1][9][14] = 12, - [0][1][8][14] = 42, - [0][1][11][14] = 12, - [0][1][2][15] = 36, - [0][1][1][15] = 12, - [0][1][3][15] = 32, - [0][1][5][15] = 36, - [0][1][6][15] = 12, - [0][1][9][15] = 12, - [0][1][8][15] = 42, - [0][1][11][15] = 12, - [0][1][2][17] = 36, - [0][1][1][17] = 12, - [0][1][3][17] = 32, - [0][1][5][17] = 36, - [0][1][6][17] = 12, - [0][1][9][17] = 12, - [0][1][8][17] = 42, - [0][1][11][17] = 12, - [0][1][2][19] = 36, - [0][1][1][19] = 12, - [0][1][3][19] = 32, - [0][1][5][19] = 36, - [0][1][6][19] = 12, - [0][1][9][19] = 12, - [0][1][8][19] = 42, - [0][1][11][19] = 12, - [0][1][2][21] = 36, - [0][1][1][21] = 12, - [0][1][3][21] = 32, - [0][1][5][21] = 36, - [0][1][6][21] = 12, - [0][1][9][21] = 12, - [0][1][8][21] = 42, - [0][1][11][21] = 12, - [0][1][2][23] = 36, - [0][1][1][23] = 12, - [0][1][3][23] = 32, - [0][1][5][23] = 36, - [0][1][6][23] = 12, - [0][1][9][23] = 12, - [0][1][8][23] = 42, - [0][1][11][23] = 12, - [0][1][2][25] = 36, - [0][1][1][25] = 12, - [0][1][3][25] = 32, - [0][1][5][25] = 127, - [0][1][6][25] = 12, - [0][1][9][25] = 127, - [0][1][8][25] = 42, - [0][1][11][25] = 12, - [0][1][2][27] = 36, - [0][1][1][27] = 12, - [0][1][3][27] = 32, - [0][1][5][27] = 127, - [0][1][6][27] = 12, - [0][1][9][27] = 127, - [0][1][8][27] = 42, - [0][1][11][27] = 12, - [0][1][2][29] = 36, - [0][1][1][29] = 12, - [0][1][3][29] = 32, - [0][1][5][29] = 127, - [0][1][6][29] = 12, - [0][1][9][29] = 127, - [0][1][8][29] = 42, - [0][1][11][29] = 12, - [0][1][2][31] = 36, - [0][1][1][31] = 12, - [0][1][3][31] = 32, - [0][1][5][31] = 36, - [0][1][6][31] = 12, - [0][1][9][31] = 12, - [0][1][8][31] = 42, - [0][1][11][31] = 12, - [0][1][2][33] = 36, - [0][1][1][33] = 12, - [0][1][3][33] = 32, - [0][1][5][33] = 36, - [0][1][6][33] = 12, - [0][1][9][33] = 12, - [0][1][8][33] = 42, - [0][1][11][33] = 12, - [0][1][2][35] = 36, - [0][1][1][35] = 12, - [0][1][3][35] = 32, - [0][1][5][35] = 36, - [0][1][6][35] = 12, - [0][1][9][35] = 12, - [0][1][8][35] = 42, - [0][1][11][35] = 12, - [0][1][2][37] = 36, - [0][1][1][37] = 127, - [0][1][3][37] = 32, - [0][1][5][37] = 36, - [0][1][6][37] = 12, - [0][1][9][37] = 36, - [0][1][8][37] = 42, - [0][1][11][37] = 127, - [0][1][2][38] = 72, - [0][1][1][38] = 16, - [0][1][3][38] = 127, - [0][1][5][38] = 72, - [0][1][6][38] = 16, - [0][1][9][38] = 76, - [0][1][8][38] = 42, - [0][1][11][38] = 16, - [0][1][2][40] = 76, - [0][1][1][40] = 16, - [0][1][3][40] = 127, - [0][1][5][40] = 76, - [0][1][6][40] = 16, - [0][1][9][40] = 76, - [0][1][8][40] = 42, - [0][1][11][40] = 16, - [0][1][2][42] = 76, - [0][1][1][42] = 16, - [0][1][3][42] = 127, - [0][1][5][42] = 76, - [0][1][6][42] = 16, - [0][1][9][42] = 76, - [0][1][8][42] = 42, - [0][1][11][42] = 16, - [0][1][2][44] = 76, - [0][1][1][44] = 16, - [0][1][3][44] = 127, - [0][1][5][44] = 76, - [0][1][6][44] = 16, - [0][1][9][44] = 76, - [0][1][8][44] = 42, - [0][1][11][44] = 16, - [0][1][2][46] = 76, - [0][1][1][46] = 16, - [0][1][3][46] = 127, - [0][1][5][46] = 76, - [0][1][6][46] = 16, - [0][1][9][46] = 76, - [0][1][8][46] = 42, - [0][1][11][46] = 16, - [1][0][2][0] = 62, - [1][0][1][0] = 36, - [1][0][3][0] = 36, - [1][0][5][0] = 34, - [1][0][6][0] = 36, - [1][0][9][0] = 36, - [1][0][8][0] = 30, - [1][0][11][0] = 36, - [1][0][2][2] = 62, - [1][0][1][2] = 36, - [1][0][3][2] = 36, - [1][0][5][2] = 34, - [1][0][6][2] = 36, - [1][0][9][2] = 36, - [1][0][8][2] = 30, - [1][0][11][2] = 36, - [1][0][2][4] = 62, - [1][0][1][4] = 36, - [1][0][3][4] = 36, - [1][0][5][4] = 34, - [1][0][6][4] = 36, - [1][0][9][4] = 36, - [1][0][8][4] = 30, - [1][0][11][4] = 36, - [1][0][2][6] = 62, - [1][0][1][6] = 36, - [1][0][3][6] = 36, - [1][0][5][6] = 34, - [1][0][6][6] = 36, - [1][0][9][6] = 36, - [1][0][8][6] = 30, - [1][0][11][6] = 36, - [1][0][2][8] = 62, - [1][0][1][8] = 36, - [1][0][3][8] = 36, - [1][0][5][8] = 62, - [1][0][6][8] = 36, - [1][0][9][8] = 36, - [1][0][8][8] = 54, - [1][0][11][8] = 36, - [1][0][2][10] = 62, - [1][0][1][10] = 36, - [1][0][3][10] = 36, - [1][0][5][10] = 62, - [1][0][6][10] = 36, - [1][0][9][10] = 36, - [1][0][8][10] = 54, - [1][0][11][10] = 36, - [1][0][2][12] = 62, - [1][0][1][12] = 36, - [1][0][3][12] = 36, - [1][0][5][12] = 62, - [1][0][6][12] = 36, - [1][0][9][12] = 36, - [1][0][8][12] = 54, - [1][0][11][12] = 36, - [1][0][2][14] = 62, - [1][0][1][14] = 36, - [1][0][3][14] = 36, - [1][0][5][14] = 62, - [1][0][6][14] = 36, - [1][0][9][14] = 36, - [1][0][8][14] = 54, - [1][0][11][14] = 36, - [1][0][2][15] = 62, - [1][0][1][15] = 36, - [1][0][3][15] = 58, - [1][0][5][15] = 62, - [1][0][6][15] = 36, - [1][0][9][15] = 36, - [1][0][8][15] = 54, - [1][0][11][15] = 36, - [1][0][2][17] = 62, - [1][0][1][17] = 36, - [1][0][3][17] = 58, - [1][0][5][17] = 62, - [1][0][6][17] = 36, - [1][0][9][17] = 36, - [1][0][8][17] = 54, - [1][0][11][17] = 36, - [1][0][2][19] = 62, - [1][0][1][19] = 36, - [1][0][3][19] = 58, - [1][0][5][19] = 62, - [1][0][6][19] = 36, - [1][0][9][19] = 36, - [1][0][8][19] = 54, - [1][0][11][19] = 36, - [1][0][2][21] = 62, - [1][0][1][21] = 36, - [1][0][3][21] = 58, - [1][0][5][21] = 62, - [1][0][6][21] = 36, - [1][0][9][21] = 36, - [1][0][8][21] = 54, - [1][0][11][21] = 36, - [1][0][2][23] = 62, - [1][0][1][23] = 36, - [1][0][3][23] = 58, - [1][0][5][23] = 62, - [1][0][6][23] = 36, - [1][0][9][23] = 36, - [1][0][8][23] = 54, - [1][0][11][23] = 36, - [1][0][2][25] = 62, - [1][0][1][25] = 36, - [1][0][3][25] = 58, - [1][0][5][25] = 127, - [1][0][6][25] = 36, - [1][0][9][25] = 127, - [1][0][8][25] = 54, - [1][0][11][25] = 36, - [1][0][2][27] = 62, - [1][0][1][27] = 36, - [1][0][3][27] = 58, - [1][0][5][27] = 127, - [1][0][6][27] = 36, - [1][0][9][27] = 127, - [1][0][8][27] = 54, - [1][0][11][27] = 36, - [1][0][2][29] = 62, - [1][0][1][29] = 36, - [1][0][3][29] = 58, - [1][0][5][29] = 127, - [1][0][6][29] = 36, - [1][0][9][29] = 127, - [1][0][8][29] = 54, - [1][0][11][29] = 36, - [1][0][2][31] = 62, - [1][0][1][31] = 36, - [1][0][3][31] = 58, - [1][0][5][31] = 62, - [1][0][6][31] = 36, - [1][0][9][31] = 36, - [1][0][8][31] = 54, - [1][0][11][31] = 36, - [1][0][2][33] = 62, - [1][0][1][33] = 36, - [1][0][3][33] = 58, - [1][0][5][33] = 62, - [1][0][6][33] = 36, - [1][0][9][33] = 36, - [1][0][8][33] = 54, - [1][0][11][33] = 36, - [1][0][2][35] = 62, - [1][0][1][35] = 36, - [1][0][3][35] = 58, - [1][0][5][35] = 62, - [1][0][6][35] = 36, - [1][0][9][35] = 36, - [1][0][8][35] = 54, - [1][0][11][35] = 36, - [1][0][2][37] = 56, - [1][0][1][37] = 62, - [1][0][3][37] = 127, - [1][0][5][37] = 58, - [1][0][6][37] = 62, - [1][0][9][37] = 36, - [1][0][8][37] = 62, - [1][0][11][37] = 54, - [1][0][2][38] = 76, - [1][0][1][38] = 28, - [1][0][3][38] = 127, - [1][0][5][38] = 76, - [1][0][6][38] = 28, - [1][0][9][38] = 76, - [1][0][8][38] = 54, - [1][0][11][38] = 28, - [1][0][2][40] = 76, - [1][0][1][40] = 28, - [1][0][3][40] = 127, - [1][0][5][40] = 76, - [1][0][6][40] = 28, - [1][0][9][40] = 76, - [1][0][8][40] = 54, - [1][0][11][40] = 28, - [1][0][2][42] = 76, - [1][0][1][42] = 28, - [1][0][3][42] = 127, - [1][0][5][42] = 76, - [1][0][6][42] = 28, - [1][0][9][42] = 76, - [1][0][8][42] = 54, - [1][0][11][42] = 28, - [1][0][2][44] = 76, - [1][0][1][44] = 28, - [1][0][3][44] = 127, - [1][0][5][44] = 76, - [1][0][6][44] = 28, - [1][0][9][44] = 76, - [1][0][8][44] = 54, - [1][0][11][44] = 28, - [1][0][2][46] = 76, - [1][0][1][46] = 28, - [1][0][3][46] = 127, - [1][0][5][46] = 76, - [1][0][6][46] = 28, - [1][0][9][46] = 76, - [1][0][8][46] = 54, - [1][0][11][46] = 28, - [1][1][2][0] = 46, - [1][1][1][0] = 22, - [1][1][3][0] = 24, - [1][1][5][0] = 18, - [1][1][6][0] = 22, - [1][1][9][0] = 22, - [1][1][8][0] = 18, - [1][1][11][0] = 22, - [1][1][2][2] = 46, - [1][1][1][2] = 22, - [1][1][3][2] = 24, - [1][1][5][2] = 18, - [1][1][6][2] = 22, - [1][1][9][2] = 22, - [1][1][8][2] = 18, - [1][1][11][2] = 22, - [1][1][2][4] = 46, - [1][1][1][4] = 22, - [1][1][3][4] = 24, - [1][1][5][4] = 18, - [1][1][6][4] = 22, - [1][1][9][4] = 22, - [1][1][8][4] = 18, - [1][1][11][4] = 22, - [1][1][2][6] = 46, - [1][1][1][6] = 22, - [1][1][3][6] = 24, - [1][1][5][6] = 18, - [1][1][6][6] = 22, - [1][1][9][6] = 22, - [1][1][8][6] = 18, - [1][1][11][6] = 22, - [1][1][2][8] = 46, - [1][1][1][8] = 22, - [1][1][3][8] = 24, - [1][1][5][8] = 46, - [1][1][6][8] = 22, - [1][1][9][8] = 22, - [1][1][8][8] = 42, - [1][1][11][8] = 22, - [1][1][2][10] = 46, - [1][1][1][10] = 22, - [1][1][3][10] = 24, - [1][1][5][10] = 46, - [1][1][6][10] = 22, - [1][1][9][10] = 22, - [1][1][8][10] = 42, - [1][1][11][10] = 22, - [1][1][2][12] = 46, - [1][1][1][12] = 22, - [1][1][3][12] = 24, - [1][1][5][12] = 46, - [1][1][6][12] = 22, - [1][1][9][12] = 22, - [1][1][8][12] = 42, - [1][1][11][12] = 22, - [1][1][2][14] = 46, - [1][1][1][14] = 22, - [1][1][3][14] = 24, - [1][1][5][14] = 46, - [1][1][6][14] = 22, - [1][1][9][14] = 22, - [1][1][8][14] = 42, - [1][1][11][14] = 22, - [1][1][2][15] = 46, - [1][1][1][15] = 22, - [1][1][3][15] = 46, - [1][1][5][15] = 46, - [1][1][6][15] = 22, - [1][1][9][15] = 22, - [1][1][8][15] = 42, - [1][1][11][15] = 22, - [1][1][2][17] = 46, - [1][1][1][17] = 22, - [1][1][3][17] = 46, - [1][1][5][17] = 46, - [1][1][6][17] = 22, - [1][1][9][17] = 22, - [1][1][8][17] = 42, - [1][1][11][17] = 22, - [1][1][2][19] = 46, - [1][1][1][19] = 22, - [1][1][3][19] = 46, - [1][1][5][19] = 46, - [1][1][6][19] = 22, - [1][1][9][19] = 22, - [1][1][8][19] = 42, - [1][1][11][19] = 22, - [1][1][2][21] = 46, - [1][1][1][21] = 22, - [1][1][3][21] = 46, - [1][1][5][21] = 46, - [1][1][6][21] = 22, - [1][1][9][21] = 22, - [1][1][8][21] = 42, - [1][1][11][21] = 22, - [1][1][2][23] = 46, - [1][1][1][23] = 22, - [1][1][3][23] = 46, - [1][1][5][23] = 46, - [1][1][6][23] = 22, - [1][1][9][23] = 22, - [1][1][8][23] = 42, - [1][1][11][23] = 22, - [1][1][2][25] = 46, - [1][1][1][25] = 22, - [1][1][3][25] = 46, - [1][1][5][25] = 127, - [1][1][6][25] = 22, - [1][1][9][25] = 127, - [1][1][8][25] = 42, - [1][1][11][25] = 22, - [1][1][2][27] = 46, - [1][1][1][27] = 22, - [1][1][3][27] = 46, - [1][1][5][27] = 127, - [1][1][6][27] = 22, - [1][1][9][27] = 127, - [1][1][8][27] = 42, - [1][1][11][27] = 22, - [1][1][2][29] = 46, - [1][1][1][29] = 22, - [1][1][3][29] = 46, - [1][1][5][29] = 127, - [1][1][6][29] = 22, - [1][1][9][29] = 127, - [1][1][8][29] = 42, - [1][1][11][29] = 22, - [1][1][2][31] = 46, - [1][1][1][31] = 22, - [1][1][3][31] = 46, - [1][1][5][31] = 46, - [1][1][6][31] = 22, - [1][1][9][31] = 22, - [1][1][8][31] = 42, - [1][1][11][31] = 22, - [1][1][2][33] = 46, - [1][1][1][33] = 22, - [1][1][3][33] = 46, - [1][1][5][33] = 46, - [1][1][6][33] = 22, - [1][1][9][33] = 22, - [1][1][8][33] = 42, - [1][1][11][33] = 22, - [1][1][2][35] = 46, - [1][1][1][35] = 22, - [1][1][3][35] = 46, - [1][1][5][35] = 46, - [1][1][6][35] = 22, - [1][1][9][35] = 22, - [1][1][8][35] = 42, - [1][1][11][35] = 22, - [1][1][2][37] = 46, - [1][1][1][37] = 127, - [1][1][3][37] = 46, - [1][1][5][37] = 46, - [1][1][6][37] = 22, - [1][1][9][37] = 50, - [1][1][8][37] = 42, - [1][1][11][37] = 127, - [1][1][2][38] = 74, - [1][1][1][38] = 16, - [1][1][3][38] = 127, - [1][1][5][38] = 74, - [1][1][6][38] = 16, - [1][1][9][38] = 76, - [1][1][8][38] = 42, - [1][1][11][38] = 16, - [1][1][2][40] = 76, - [1][1][1][40] = 16, - [1][1][3][40] = 127, - [1][1][5][40] = 76, - [1][1][6][40] = 16, - [1][1][9][40] = 76, - [1][1][8][40] = 42, - [1][1][11][40] = 16, - [1][1][2][42] = 76, - [1][1][1][42] = 16, - [1][1][3][42] = 127, - [1][1][5][42] = 76, - [1][1][6][42] = 16, - [1][1][9][42] = 76, - [1][1][8][42] = 42, - [1][1][11][42] = 16, - [1][1][2][44] = 76, - [1][1][1][44] = 16, - [1][1][3][44] = 127, - [1][1][5][44] = 76, - [1][1][6][44] = 16, - [1][1][9][44] = 76, - [1][1][8][44] = 42, - [1][1][11][44] = 16, - [1][1][2][46] = 76, - [1][1][1][46] = 16, - [1][1][3][46] = 127, - [1][1][5][46] = 76, - [1][1][6][46] = 16, - [1][1][9][46] = 76, - [1][1][8][46] = 42, - [1][1][11][46] = 16, - [2][0][2][0] = 74, - [2][0][1][0] = 46, - [2][0][3][0] = 50, - [2][0][5][0] = 46, - [2][0][6][0] = 46, - [2][0][9][0] = 46, - [2][0][8][0] = 30, - [2][0][11][0] = 46, - [2][0][2][2] = 74, - [2][0][1][2] = 46, - [2][0][3][2] = 50, - [2][0][5][2] = 46, - [2][0][6][2] = 46, - [2][0][9][2] = 46, - [2][0][8][2] = 30, - [2][0][11][2] = 46, - [2][0][2][4] = 74, - [2][0][1][4] = 46, - [2][0][3][4] = 50, - [2][0][5][4] = 46, - [2][0][6][4] = 46, - [2][0][9][4] = 46, - [2][0][8][4] = 30, - [2][0][11][4] = 46, - [2][0][2][6] = 74, - [2][0][1][6] = 46, - [2][0][3][6] = 50, - [2][0][5][6] = 46, - [2][0][6][6] = 46, - [2][0][9][6] = 46, - [2][0][8][6] = 30, - [2][0][11][6] = 46, - [2][0][2][8] = 74, - [2][0][1][8] = 46, - [2][0][3][8] = 50, - [2][0][5][8] = 66, - [2][0][6][8] = 46, - [2][0][9][8] = 46, - [2][0][8][8] = 54, - [2][0][11][8] = 46, - [2][0][2][10] = 74, - [2][0][1][10] = 46, - [2][0][3][10] = 50, - [2][0][5][10] = 66, - [2][0][6][10] = 46, - [2][0][9][10] = 46, - [2][0][8][10] = 54, - [2][0][11][10] = 46, - [2][0][2][12] = 74, - [2][0][1][12] = 46, - [2][0][3][12] = 50, - [2][0][5][12] = 66, - [2][0][6][12] = 46, - [2][0][9][12] = 46, - [2][0][8][12] = 54, - [2][0][11][12] = 46, - [2][0][2][14] = 74, - [2][0][1][14] = 46, - [2][0][3][14] = 50, - [2][0][5][14] = 66, - [2][0][6][14] = 46, - [2][0][9][14] = 46, - [2][0][8][14] = 54, - [2][0][11][14] = 46, - [2][0][2][15] = 74, - [2][0][1][15] = 46, - [2][0][3][15] = 70, - [2][0][5][15] = 74, - [2][0][6][15] = 46, - [2][0][9][15] = 46, - [2][0][8][15] = 54, - [2][0][11][15] = 46, - [2][0][2][17] = 74, - [2][0][1][17] = 46, - [2][0][3][17] = 70, - [2][0][5][17] = 74, - [2][0][6][17] = 46, - [2][0][9][17] = 46, - [2][0][8][17] = 54, - [2][0][11][17] = 46, - [2][0][2][19] = 74, - [2][0][1][19] = 46, - [2][0][3][19] = 70, - [2][0][5][19] = 74, - [2][0][6][19] = 46, - [2][0][9][19] = 46, - [2][0][8][19] = 54, - [2][0][11][19] = 46, - [2][0][2][21] = 74, - [2][0][1][21] = 46, - [2][0][3][21] = 70, - [2][0][5][21] = 74, - [2][0][6][21] = 46, - [2][0][9][21] = 46, - [2][0][8][21] = 54, - [2][0][11][21] = 46, - [2][0][2][23] = 74, - [2][0][1][23] = 46, - [2][0][3][23] = 70, - [2][0][5][23] = 74, - [2][0][6][23] = 46, - [2][0][9][23] = 46, - [2][0][8][23] = 54, - [2][0][11][23] = 46, - [2][0][2][25] = 74, - [2][0][1][25] = 46, - [2][0][3][25] = 70, - [2][0][5][25] = 127, - [2][0][6][25] = 46, - [2][0][9][25] = 127, - [2][0][8][25] = 54, - [2][0][11][25] = 46, - [2][0][2][27] = 74, - [2][0][1][27] = 46, - [2][0][3][27] = 70, - [2][0][5][27] = 127, - [2][0][6][27] = 46, - [2][0][9][27] = 127, - [2][0][8][27] = 54, - [2][0][11][27] = 46, - [2][0][2][29] = 74, - [2][0][1][29] = 46, - [2][0][3][29] = 70, - [2][0][5][29] = 127, - [2][0][6][29] = 46, - [2][0][9][29] = 127, - [2][0][8][29] = 54, - [2][0][11][29] = 46, - [2][0][2][31] = 74, - [2][0][1][31] = 46, - [2][0][3][31] = 70, - [2][0][5][31] = 74, - [2][0][6][31] = 46, - [2][0][9][31] = 46, - [2][0][8][31] = 54, - [2][0][11][31] = 46, - [2][0][2][33] = 74, - [2][0][1][33] = 46, - [2][0][3][33] = 70, - [2][0][5][33] = 74, - [2][0][6][33] = 46, - [2][0][9][33] = 46, - [2][0][8][33] = 54, - [2][0][11][33] = 46, - [2][0][2][35] = 74, - [2][0][1][35] = 46, - [2][0][3][35] = 70, - [2][0][5][35] = 74, - [2][0][6][35] = 46, - [2][0][9][35] = 46, - [2][0][8][35] = 54, - [2][0][11][35] = 46, - [2][0][2][37] = 74, - [2][0][1][37] = 127, - [2][0][3][37] = 70, - [2][0][5][37] = 74, - [2][0][6][37] = 46, - [2][0][9][37] = 74, - [2][0][8][37] = 54, - [2][0][11][37] = 127, - [2][0][2][38] = 76, - [2][0][1][38] = 28, - [2][0][3][38] = 127, - [2][0][5][38] = 76, - [2][0][6][38] = 28, - [2][0][9][38] = 76, - [2][0][8][38] = 54, - [2][0][11][38] = 28, - [2][0][2][40] = 76, - [2][0][1][40] = 28, - [2][0][3][40] = 127, - [2][0][5][40] = 76, - [2][0][6][40] = 28, - [2][0][9][40] = 76, - [2][0][8][40] = 54, - [2][0][11][40] = 28, - [2][0][2][42] = 76, - [2][0][1][42] = 28, - [2][0][3][42] = 127, - [2][0][5][42] = 76, - [2][0][6][42] = 28, - [2][0][9][42] = 76, - [2][0][8][42] = 54, - [2][0][11][42] = 28, - [2][0][2][44] = 76, - [2][0][1][44] = 28, - [2][0][3][44] = 127, - [2][0][5][44] = 76, - [2][0][6][44] = 28, - [2][0][9][44] = 76, - [2][0][8][44] = 54, - [2][0][11][44] = 28, - [2][0][2][46] = 76, - [2][0][1][46] = 28, - [2][0][3][46] = 127, - [2][0][5][46] = 76, - [2][0][6][46] = 28, - [2][0][9][46] = 76, - [2][0][8][46] = 54, - [2][0][11][46] = 28, - [2][1][2][0] = 58, - [2][1][1][0] = 32, - [2][1][3][0] = 38, - [2][1][5][0] = 30, - [2][1][6][0] = 32, - [2][1][9][0] = 32, - [2][1][8][0] = 18, - [2][1][11][0] = 32, - [2][1][2][2] = 58, - [2][1][1][2] = 32, - [2][1][3][2] = 38, - [2][1][5][2] = 30, - [2][1][6][2] = 32, - [2][1][9][2] = 32, - [2][1][8][2] = 18, - [2][1][11][2] = 32, - [2][1][2][4] = 58, - [2][1][1][4] = 32, - [2][1][3][4] = 38, - [2][1][5][4] = 30, - [2][1][6][4] = 32, - [2][1][9][4] = 32, - [2][1][8][4] = 18, - [2][1][11][4] = 32, - [2][1][2][6] = 58, - [2][1][1][6] = 32, - [2][1][3][6] = 38, - [2][1][5][6] = 30, - [2][1][6][6] = 32, - [2][1][9][6] = 32, - [2][1][8][6] = 18, - [2][1][11][6] = 32, - [2][1][2][8] = 58, - [2][1][1][8] = 32, - [2][1][3][8] = 38, - [2][1][5][8] = 52, - [2][1][6][8] = 32, - [2][1][9][8] = 32, - [2][1][8][8] = 42, - [2][1][11][8] = 32, - [2][1][2][10] = 58, - [2][1][1][10] = 32, - [2][1][3][10] = 38, - [2][1][5][10] = 52, - [2][1][6][10] = 32, - [2][1][9][10] = 32, - [2][1][8][10] = 42, - [2][1][11][10] = 32, - [2][1][2][12] = 58, - [2][1][1][12] = 32, - [2][1][3][12] = 38, - [2][1][5][12] = 52, - [2][1][6][12] = 32, - [2][1][9][12] = 32, - [2][1][8][12] = 42, - [2][1][11][12] = 32, - [2][1][2][14] = 58, - [2][1][1][14] = 32, - [2][1][3][14] = 38, - [2][1][5][14] = 52, - [2][1][6][14] = 32, - [2][1][9][14] = 32, - [2][1][8][14] = 42, - [2][1][11][14] = 32, - [2][1][2][15] = 58, - [2][1][1][15] = 32, - [2][1][3][15] = 58, - [2][1][5][15] = 58, - [2][1][6][15] = 32, - [2][1][9][15] = 32, - [2][1][8][15] = 42, - [2][1][11][15] = 32, - [2][1][2][17] = 58, - [2][1][1][17] = 32, - [2][1][3][17] = 58, - [2][1][5][17] = 58, - [2][1][6][17] = 32, - [2][1][9][17] = 32, - [2][1][8][17] = 42, - [2][1][11][17] = 32, - [2][1][2][19] = 58, - [2][1][1][19] = 32, - [2][1][3][19] = 58, - [2][1][5][19] = 58, - [2][1][6][19] = 32, - [2][1][9][19] = 32, - [2][1][8][19] = 42, - [2][1][11][19] = 32, - [2][1][2][21] = 58, - [2][1][1][21] = 32, - [2][1][3][21] = 58, - [2][1][5][21] = 58, - [2][1][6][21] = 32, - [2][1][9][21] = 32, - [2][1][8][21] = 42, - [2][1][11][21] = 32, - [2][1][2][23] = 58, - [2][1][1][23] = 32, - [2][1][3][23] = 58, - [2][1][5][23] = 58, - [2][1][6][23] = 32, - [2][1][9][23] = 32, - [2][1][8][23] = 42, - [2][1][11][23] = 32, - [2][1][2][25] = 58, - [2][1][1][25] = 32, - [2][1][3][25] = 58, - [2][1][5][25] = 127, - [2][1][6][25] = 32, - [2][1][9][25] = 127, - [2][1][8][25] = 42, - [2][1][11][25] = 32, - [2][1][2][27] = 58, - [2][1][1][27] = 32, - [2][1][3][27] = 58, - [2][1][5][27] = 127, - [2][1][6][27] = 32, - [2][1][9][27] = 127, - [2][1][8][27] = 42, - [2][1][11][27] = 32, - [2][1][2][29] = 58, - [2][1][1][29] = 32, - [2][1][3][29] = 58, - [2][1][5][29] = 127, - [2][1][6][29] = 32, - [2][1][9][29] = 127, - [2][1][8][29] = 42, - [2][1][11][29] = 32, - [2][1][2][31] = 58, - [2][1][1][31] = 32, - [2][1][3][31] = 58, - [2][1][5][31] = 58, - [2][1][6][31] = 32, - [2][1][9][31] = 32, - [2][1][8][31] = 42, - [2][1][11][31] = 32, - [2][1][2][33] = 58, - [2][1][1][33] = 32, - [2][1][3][33] = 58, - [2][1][5][33] = 58, - [2][1][6][33] = 32, - [2][1][9][33] = 32, - [2][1][8][33] = 42, - [2][1][11][33] = 32, - [2][1][2][35] = 58, - [2][1][1][35] = 32, - [2][1][3][35] = 58, - [2][1][5][35] = 58, - [2][1][6][35] = 32, - [2][1][9][35] = 32, - [2][1][8][35] = 42, - [2][1][11][35] = 32, - [2][1][2][37] = 58, - [2][1][1][37] = 127, - [2][1][3][37] = 58, - [2][1][5][37] = 58, - [2][1][6][37] = 32, - [2][1][9][37] = 62, - [2][1][8][37] = 42, - [2][1][11][37] = 127, - [2][1][2][38] = 76, - [2][1][1][38] = 16, - [2][1][3][38] = 127, - [2][1][5][38] = 76, - [2][1][6][38] = 16, - [2][1][9][38] = 76, - [2][1][8][38] = 42, - [2][1][11][38] = 16, - [2][1][2][40] = 76, - [2][1][1][40] = 16, - [2][1][3][40] = 127, - [2][1][5][40] = 76, - [2][1][6][40] = 16, - [2][1][9][40] = 76, - [2][1][8][40] = 42, - [2][1][11][40] = 16, - [2][1][2][42] = 76, - [2][1][1][42] = 16, - [2][1][3][42] = 127, - [2][1][5][42] = 76, - [2][1][6][42] = 16, - [2][1][9][42] = 76, - [2][1][8][42] = 42, - [2][1][11][42] = 16, - [2][1][2][44] = 76, - [2][1][1][44] = 16, - [2][1][3][44] = 127, - [2][1][5][44] = 76, - [2][1][6][44] = 16, - [2][1][9][44] = 76, - [2][1][8][44] = 42, - [2][1][11][44] = 16, - [2][1][2][46] = 76, - [2][1][1][46] = 16, - [2][1][3][46] = 127, - [2][1][5][46] = 76, - [2][1][6][46] = 16, - [2][1][9][46] = 76, - [2][1][8][46] = 42, - [2][1][11][46] = 16, + [0][0][RTW89_WW][0] = 22, + [0][0][RTW89_WW][2] = 22, + [0][0][RTW89_WW][4] = 22, + [0][0][RTW89_WW][6] = 20, + [0][0][RTW89_WW][8] = 24, + [0][0][RTW89_WW][10] = 24, + [0][0][RTW89_WW][12] = 24, + [0][0][RTW89_WW][14] = 24, + [0][0][RTW89_WW][15] = 24, + [0][0][RTW89_WW][17] = 24, + [0][0][RTW89_WW][19] = 24, + [0][0][RTW89_WW][21] = 24, + [0][0][RTW89_WW][23] = 24, + [0][0][RTW89_WW][25] = 24, + [0][0][RTW89_WW][27] = 24, + [0][0][RTW89_WW][29] = 24, + [0][0][RTW89_WW][31] = 24, + [0][0][RTW89_WW][33] = 24, + [0][0][RTW89_WW][35] = 24, + [0][0][RTW89_WW][37] = 44, + [0][0][RTW89_WW][38] = 28, + [0][0][RTW89_WW][40] = 28, + [0][0][RTW89_WW][42] = 28, + [0][0][RTW89_WW][44] = 28, + [0][0][RTW89_WW][46] = 28, + [0][1][RTW89_WW][0] = 8, + [0][1][RTW89_WW][2] = 8, + [0][1][RTW89_WW][4] = 8, + [0][1][RTW89_WW][6] = 4, + [0][1][RTW89_WW][8] = 12, + [0][1][RTW89_WW][10] = 12, + [0][1][RTW89_WW][12] = 12, + [0][1][RTW89_WW][14] = 12, + [0][1][RTW89_WW][15] = 12, + [0][1][RTW89_WW][17] = 12, + [0][1][RTW89_WW][19] = 12, + [0][1][RTW89_WW][21] = 12, + [0][1][RTW89_WW][23] = 12, + [0][1][RTW89_WW][25] = 12, + [0][1][RTW89_WW][27] = 12, + [0][1][RTW89_WW][29] = 12, + [0][1][RTW89_WW][31] = 12, + [0][1][RTW89_WW][33] = 12, + [0][1][RTW89_WW][35] = 12, + [0][1][RTW89_WW][37] = 32, + [0][1][RTW89_WW][38] = 16, + [0][1][RTW89_WW][40] = 16, + [0][1][RTW89_WW][42] = 16, + [0][1][RTW89_WW][44] = 16, + [0][1][RTW89_WW][46] = 16, + [1][0][RTW89_WW][0] = 30, + [1][0][RTW89_WW][2] = 30, + [1][0][RTW89_WW][4] = 30, + [1][0][RTW89_WW][6] = 30, + [1][0][RTW89_WW][8] = 36, + [1][0][RTW89_WW][10] = 36, + [1][0][RTW89_WW][12] = 36, + [1][0][RTW89_WW][14] = 36, + [1][0][RTW89_WW][15] = 36, + [1][0][RTW89_WW][17] = 36, + [1][0][RTW89_WW][19] = 36, + [1][0][RTW89_WW][21] = 36, + [1][0][RTW89_WW][23] = 36, + [1][0][RTW89_WW][25] = 36, + [1][0][RTW89_WW][27] = 36, + [1][0][RTW89_WW][29] = 36, + [1][0][RTW89_WW][31] = 36, + [1][0][RTW89_WW][33] = 36, + [1][0][RTW89_WW][35] = 36, + [1][0][RTW89_WW][37] = 54, + [1][0][RTW89_WW][38] = 28, + [1][0][RTW89_WW][40] = 28, + [1][0][RTW89_WW][42] = 28, + [1][0][RTW89_WW][44] = 28, + [1][0][RTW89_WW][46] = 28, + [1][1][RTW89_WW][0] = 18, + [1][1][RTW89_WW][2] = 18, + [1][1][RTW89_WW][4] = 18, + [1][1][RTW89_WW][6] = 16, + [1][1][RTW89_WW][8] = 22, + [1][1][RTW89_WW][10] = 22, + [1][1][RTW89_WW][12] = 22, + [1][1][RTW89_WW][14] = 22, + [1][1][RTW89_WW][15] = 22, + [1][1][RTW89_WW][17] = 22, + [1][1][RTW89_WW][19] = 22, + [1][1][RTW89_WW][21] = 22, + [1][1][RTW89_WW][23] = 22, + [1][1][RTW89_WW][25] = 22, + [1][1][RTW89_WW][27] = 22, + [1][1][RTW89_WW][29] = 22, + [1][1][RTW89_WW][31] = 22, + [1][1][RTW89_WW][33] = 22, + [1][1][RTW89_WW][35] = 22, + [1][1][RTW89_WW][37] = 42, + [1][1][RTW89_WW][38] = 16, + [1][1][RTW89_WW][40] = 16, + [1][1][RTW89_WW][42] = 16, + [1][1][RTW89_WW][44] = 16, + [1][1][RTW89_WW][46] = 16, + [2][0][RTW89_WW][0] = 30, + [2][0][RTW89_WW][2] = 30, + [2][0][RTW89_WW][4] = 30, + [2][0][RTW89_WW][6] = 30, + [2][0][RTW89_WW][8] = 46, + [2][0][RTW89_WW][10] = 46, + [2][0][RTW89_WW][12] = 46, + [2][0][RTW89_WW][14] = 46, + [2][0][RTW89_WW][15] = 46, + [2][0][RTW89_WW][17] = 46, + [2][0][RTW89_WW][19] = 46, + [2][0][RTW89_WW][21] = 46, + [2][0][RTW89_WW][23] = 46, + [2][0][RTW89_WW][25] = 46, + [2][0][RTW89_WW][27] = 46, + [2][0][RTW89_WW][29] = 46, + [2][0][RTW89_WW][31] = 46, + [2][0][RTW89_WW][33] = 46, + [2][0][RTW89_WW][35] = 46, + [2][0][RTW89_WW][37] = 54, + [2][0][RTW89_WW][38] = 28, + [2][0][RTW89_WW][40] = 28, + [2][0][RTW89_WW][42] = 28, + [2][0][RTW89_WW][44] = 28, + [2][0][RTW89_WW][46] = 28, + [2][1][RTW89_WW][0] = 18, + [2][1][RTW89_WW][2] = 18, + [2][1][RTW89_WW][4] = 18, + [2][1][RTW89_WW][6] = 18, + [2][1][RTW89_WW][8] = 32, + [2][1][RTW89_WW][10] = 32, + [2][1][RTW89_WW][12] = 32, + [2][1][RTW89_WW][14] = 32, + [2][1][RTW89_WW][15] = 32, + [2][1][RTW89_WW][17] = 32, + [2][1][RTW89_WW][19] = 32, + [2][1][RTW89_WW][21] = 32, + [2][1][RTW89_WW][23] = 32, + [2][1][RTW89_WW][25] = 32, + [2][1][RTW89_WW][27] = 32, + [2][1][RTW89_WW][29] = 32, + [2][1][RTW89_WW][31] = 32, + [2][1][RTW89_WW][33] = 32, + [2][1][RTW89_WW][35] = 32, + [2][1][RTW89_WW][37] = 42, + [2][1][RTW89_WW][38] = 16, + [2][1][RTW89_WW][40] = 16, + [2][1][RTW89_WW][42] = 16, + [2][1][RTW89_WW][44] = 16, + [2][1][RTW89_WW][46] = 16, + [0][0][RTW89_FCC][0] = 48, + [0][0][RTW89_ETSI][0] = 24, + [0][0][RTW89_MKK][0] = 26, + [0][0][RTW89_IC][0] = 22, + [0][0][RTW89_KCC][0] = 46, + [0][0][RTW89_ACMA][0] = 24, + [0][0][RTW89_CHILE][0] = 30, + [0][0][RTW89_UKRAINE][0] = 24, + [0][0][RTW89_MEXICO][0] = 48, + [0][0][RTW89_CN][0] = 24, + [0][0][RTW89_QATAR][0] = 24, + [0][0][RTW89_FCC][2] = 48, + [0][0][RTW89_ETSI][2] = 24, + [0][0][RTW89_MKK][2] = 26, + [0][0][RTW89_IC][2] = 22, + [0][0][RTW89_KCC][2] = 46, + [0][0][RTW89_ACMA][2] = 24, + [0][0][RTW89_CHILE][2] = 30, + [0][0][RTW89_UKRAINE][2] = 24, + [0][0][RTW89_MEXICO][2] = 48, + [0][0][RTW89_CN][2] = 24, + [0][0][RTW89_QATAR][2] = 24, + [0][0][RTW89_FCC][4] = 48, + [0][0][RTW89_ETSI][4] = 24, + [0][0][RTW89_MKK][4] = 26, + [0][0][RTW89_IC][4] = 22, + [0][0][RTW89_KCC][4] = 46, + [0][0][RTW89_ACMA][4] = 24, + [0][0][RTW89_CHILE][4] = 30, + [0][0][RTW89_UKRAINE][4] = 24, + [0][0][RTW89_MEXICO][4] = 48, + [0][0][RTW89_CN][4] = 24, + [0][0][RTW89_QATAR][4] = 24, + [0][0][RTW89_FCC][6] = 48, + [0][0][RTW89_ETSI][6] = 24, + [0][0][RTW89_MKK][6] = 26, + [0][0][RTW89_IC][6] = 22, + [0][0][RTW89_KCC][6] = 20, + [0][0][RTW89_ACMA][6] = 24, + [0][0][RTW89_CHILE][6] = 30, + [0][0][RTW89_UKRAINE][6] = 24, + [0][0][RTW89_MEXICO][6] = 48, + [0][0][RTW89_CN][6] = 24, + [0][0][RTW89_QATAR][6] = 24, + [0][0][RTW89_FCC][8] = 48, + [0][0][RTW89_ETSI][8] = 24, + [0][0][RTW89_MKK][8] = 26, + [0][0][RTW89_IC][8] = 48, + [0][0][RTW89_KCC][8] = 46, + [0][0][RTW89_ACMA][8] = 24, + [0][0][RTW89_CHILE][8] = 48, + [0][0][RTW89_UKRAINE][8] = 24, + [0][0][RTW89_MEXICO][8] = 48, + [0][0][RTW89_CN][8] = 24, + [0][0][RTW89_QATAR][8] = 24, + [0][0][RTW89_FCC][10] = 48, + [0][0][RTW89_ETSI][10] = 24, + [0][0][RTW89_MKK][10] = 26, + [0][0][RTW89_IC][10] = 48, + [0][0][RTW89_KCC][10] = 46, + [0][0][RTW89_ACMA][10] = 24, + [0][0][RTW89_CHILE][10] = 48, + [0][0][RTW89_UKRAINE][10] = 24, + [0][0][RTW89_MEXICO][10] = 48, + [0][0][RTW89_CN][10] = 24, + [0][0][RTW89_QATAR][10] = 24, + [0][0][RTW89_FCC][12] = 48, + [0][0][RTW89_ETSI][12] = 24, + [0][0][RTW89_MKK][12] = 26, + [0][0][RTW89_IC][12] = 48, + [0][0][RTW89_KCC][12] = 46, + [0][0][RTW89_ACMA][12] = 24, + [0][0][RTW89_CHILE][12] = 48, + [0][0][RTW89_UKRAINE][12] = 24, + [0][0][RTW89_MEXICO][12] = 48, + [0][0][RTW89_CN][12] = 24, + [0][0][RTW89_QATAR][12] = 24, + [0][0][RTW89_FCC][14] = 48, + [0][0][RTW89_ETSI][14] = 24, + [0][0][RTW89_MKK][14] = 26, + [0][0][RTW89_IC][14] = 48, + [0][0][RTW89_KCC][14] = 46, + [0][0][RTW89_ACMA][14] = 24, + [0][0][RTW89_CHILE][14] = 48, + [0][0][RTW89_UKRAINE][14] = 24, + [0][0][RTW89_MEXICO][14] = 48, + [0][0][RTW89_CN][14] = 24, + [0][0][RTW89_QATAR][14] = 24, + [0][0][RTW89_FCC][15] = 48, + [0][0][RTW89_ETSI][15] = 24, + [0][0][RTW89_MKK][15] = 44, + [0][0][RTW89_IC][15] = 48, + [0][0][RTW89_KCC][15] = 46, + [0][0][RTW89_ACMA][15] = 24, + [0][0][RTW89_CHILE][15] = 48, + [0][0][RTW89_UKRAINE][15] = 24, + [0][0][RTW89_MEXICO][15] = 48, + [0][0][RTW89_CN][15] = 127, + [0][0][RTW89_QATAR][15] = 24, + [0][0][RTW89_FCC][17] = 48, + [0][0][RTW89_ETSI][17] = 24, + [0][0][RTW89_MKK][17] = 44, + [0][0][RTW89_IC][17] = 48, + [0][0][RTW89_KCC][17] = 46, + [0][0][RTW89_ACMA][17] = 24, + [0][0][RTW89_CHILE][17] = 48, + [0][0][RTW89_UKRAINE][17] = 24, + [0][0][RTW89_MEXICO][17] = 48, + [0][0][RTW89_CN][17] = 127, + [0][0][RTW89_QATAR][17] = 24, + [0][0][RTW89_FCC][19] = 48, + [0][0][RTW89_ETSI][19] = 24, + [0][0][RTW89_MKK][19] = 44, + [0][0][RTW89_IC][19] = 48, + [0][0][RTW89_KCC][19] = 46, + [0][0][RTW89_ACMA][19] = 24, + [0][0][RTW89_CHILE][19] = 48, + [0][0][RTW89_UKRAINE][19] = 24, + [0][0][RTW89_MEXICO][19] = 48, + [0][0][RTW89_CN][19] = 127, + [0][0][RTW89_QATAR][19] = 24, + [0][0][RTW89_FCC][21] = 48, + [0][0][RTW89_ETSI][21] = 24, + [0][0][RTW89_MKK][21] = 44, + [0][0][RTW89_IC][21] = 48, + [0][0][RTW89_KCC][21] = 46, + [0][0][RTW89_ACMA][21] = 24, + [0][0][RTW89_CHILE][21] = 48, + [0][0][RTW89_UKRAINE][21] = 24, + [0][0][RTW89_MEXICO][21] = 48, + [0][0][RTW89_CN][21] = 127, + [0][0][RTW89_QATAR][21] = 24, + [0][0][RTW89_FCC][23] = 48, + [0][0][RTW89_ETSI][23] = 24, + [0][0][RTW89_MKK][23] = 44, + [0][0][RTW89_IC][23] = 48, + [0][0][RTW89_KCC][23] = 46, + [0][0][RTW89_ACMA][23] = 24, + [0][0][RTW89_CHILE][23] = 48, + [0][0][RTW89_UKRAINE][23] = 24, + [0][0][RTW89_MEXICO][23] = 48, + [0][0][RTW89_CN][23] = 127, + [0][0][RTW89_QATAR][23] = 24, + [0][0][RTW89_FCC][25] = 48, + [0][0][RTW89_ETSI][25] = 24, + [0][0][RTW89_MKK][25] = 44, + [0][0][RTW89_IC][25] = 127, + [0][0][RTW89_KCC][25] = 46, + [0][0][RTW89_ACMA][25] = 127, + [0][0][RTW89_CHILE][25] = 48, + [0][0][RTW89_UKRAINE][25] = 24, + [0][0][RTW89_MEXICO][25] = 48, + [0][0][RTW89_CN][25] = 127, + [0][0][RTW89_QATAR][25] = 24, + [0][0][RTW89_FCC][27] = 48, + [0][0][RTW89_ETSI][27] = 24, + [0][0][RTW89_MKK][27] = 44, + [0][0][RTW89_IC][27] = 127, + [0][0][RTW89_KCC][27] = 46, + [0][0][RTW89_ACMA][27] = 127, + [0][0][RTW89_CHILE][27] = 48, + [0][0][RTW89_UKRAINE][27] = 24, + [0][0][RTW89_MEXICO][27] = 48, + [0][0][RTW89_CN][27] = 127, + [0][0][RTW89_QATAR][27] = 24, + [0][0][RTW89_FCC][29] = 48, + [0][0][RTW89_ETSI][29] = 24, + [0][0][RTW89_MKK][29] = 44, + [0][0][RTW89_IC][29] = 127, + [0][0][RTW89_KCC][29] = 46, + [0][0][RTW89_ACMA][29] = 127, + [0][0][RTW89_CHILE][29] = 48, + [0][0][RTW89_UKRAINE][29] = 24, + [0][0][RTW89_MEXICO][29] = 48, + [0][0][RTW89_CN][29] = 127, + [0][0][RTW89_QATAR][29] = 24, + [0][0][RTW89_FCC][31] = 48, + [0][0][RTW89_ETSI][31] = 24, + [0][0][RTW89_MKK][31] = 44, + [0][0][RTW89_IC][31] = 48, + [0][0][RTW89_KCC][31] = 46, + [0][0][RTW89_ACMA][31] = 24, + [0][0][RTW89_CHILE][31] = 48, + [0][0][RTW89_UKRAINE][31] = 24, + [0][0][RTW89_MEXICO][31] = 48, + [0][0][RTW89_CN][31] = 127, + [0][0][RTW89_QATAR][31] = 24, + [0][0][RTW89_FCC][33] = 48, + [0][0][RTW89_ETSI][33] = 24, + [0][0][RTW89_MKK][33] = 44, + [0][0][RTW89_IC][33] = 48, + [0][0][RTW89_KCC][33] = 46, + [0][0][RTW89_ACMA][33] = 24, + [0][0][RTW89_CHILE][33] = 48, + [0][0][RTW89_UKRAINE][33] = 24, + [0][0][RTW89_MEXICO][33] = 48, + [0][0][RTW89_CN][33] = 127, + [0][0][RTW89_QATAR][33] = 24, + [0][0][RTW89_FCC][35] = 48, + [0][0][RTW89_ETSI][35] = 24, + [0][0][RTW89_MKK][35] = 44, + [0][0][RTW89_IC][35] = 48, + [0][0][RTW89_KCC][35] = 46, + [0][0][RTW89_ACMA][35] = 24, + [0][0][RTW89_CHILE][35] = 48, + [0][0][RTW89_UKRAINE][35] = 24, + [0][0][RTW89_MEXICO][35] = 48, + [0][0][RTW89_CN][35] = 127, + [0][0][RTW89_QATAR][35] = 24, + [0][0][RTW89_FCC][37] = 48, + [0][0][RTW89_ETSI][37] = 127, + [0][0][RTW89_MKK][37] = 44, + [0][0][RTW89_IC][37] = 48, + [0][0][RTW89_KCC][37] = 46, + [0][0][RTW89_ACMA][37] = 48, + [0][0][RTW89_CHILE][37] = 48, + [0][0][RTW89_UKRAINE][37] = 127, + [0][0][RTW89_MEXICO][37] = 48, + [0][0][RTW89_CN][37] = 127, + [0][0][RTW89_QATAR][37] = 127, + [0][0][RTW89_FCC][38] = 76, + [0][0][RTW89_ETSI][38] = 28, + [0][0][RTW89_MKK][38] = 127, + [0][0][RTW89_IC][38] = 76, + [0][0][RTW89_KCC][38] = 46, + [0][0][RTW89_ACMA][38] = 76, + [0][0][RTW89_CHILE][38] = 54, + [0][0][RTW89_UKRAINE][38] = 28, + [0][0][RTW89_MEXICO][38] = 76, + [0][0][RTW89_CN][38] = 62, + [0][0][RTW89_QATAR][38] = 28, + [0][0][RTW89_FCC][40] = 76, + [0][0][RTW89_ETSI][40] = 28, + [0][0][RTW89_MKK][40] = 127, + [0][0][RTW89_IC][40] = 76, + [0][0][RTW89_KCC][40] = 46, + [0][0][RTW89_ACMA][40] = 76, + [0][0][RTW89_CHILE][40] = 54, + [0][0][RTW89_UKRAINE][40] = 28, + [0][0][RTW89_MEXICO][40] = 76, + [0][0][RTW89_CN][40] = 62, + [0][0][RTW89_QATAR][40] = 28, + [0][0][RTW89_FCC][42] = 76, + [0][0][RTW89_ETSI][42] = 28, + [0][0][RTW89_MKK][42] = 127, + [0][0][RTW89_IC][42] = 76, + [0][0][RTW89_KCC][42] = 46, + [0][0][RTW89_ACMA][42] = 76, + [0][0][RTW89_CHILE][42] = 54, + [0][0][RTW89_UKRAINE][42] = 28, + [0][0][RTW89_MEXICO][42] = 76, + [0][0][RTW89_CN][42] = 62, + [0][0][RTW89_QATAR][42] = 28, + [0][0][RTW89_FCC][44] = 76, + [0][0][RTW89_ETSI][44] = 28, + [0][0][RTW89_MKK][44] = 127, + [0][0][RTW89_IC][44] = 76, + [0][0][RTW89_KCC][44] = 46, + [0][0][RTW89_ACMA][44] = 76, + [0][0][RTW89_CHILE][44] = 54, + [0][0][RTW89_UKRAINE][44] = 28, + [0][0][RTW89_MEXICO][44] = 76, + [0][0][RTW89_CN][44] = 62, + [0][0][RTW89_QATAR][44] = 28, + [0][0][RTW89_FCC][46] = 76, + [0][0][RTW89_ETSI][46] = 28, + [0][0][RTW89_MKK][46] = 127, + [0][0][RTW89_IC][46] = 76, + [0][0][RTW89_KCC][46] = 46, + [0][0][RTW89_ACMA][46] = 76, + [0][0][RTW89_CHILE][46] = 54, + [0][0][RTW89_UKRAINE][46] = 28, + [0][0][RTW89_MEXICO][46] = 76, + [0][0][RTW89_CN][46] = 62, + [0][0][RTW89_QATAR][46] = 28, + [0][1][RTW89_FCC][0] = 36, + [0][1][RTW89_ETSI][0] = 12, + [0][1][RTW89_MKK][0] = 14, + [0][1][RTW89_IC][0] = 8, + [0][1][RTW89_KCC][0] = 32, + [0][1][RTW89_ACMA][0] = 12, + [0][1][RTW89_CHILE][0] = 18, + [0][1][RTW89_UKRAINE][0] = 12, + [0][1][RTW89_MEXICO][0] = 36, + [0][1][RTW89_CN][0] = 12, + [0][1][RTW89_QATAR][0] = 12, + [0][1][RTW89_FCC][2] = 36, + [0][1][RTW89_ETSI][2] = 12, + [0][1][RTW89_MKK][2] = 14, + [0][1][RTW89_IC][2] = 8, + [0][1][RTW89_KCC][2] = 32, + [0][1][RTW89_ACMA][2] = 12, + [0][1][RTW89_CHILE][2] = 18, + [0][1][RTW89_UKRAINE][2] = 12, + [0][1][RTW89_MEXICO][2] = 36, + [0][1][RTW89_CN][2] = 12, + [0][1][RTW89_QATAR][2] = 12, + [0][1][RTW89_FCC][4] = 36, + [0][1][RTW89_ETSI][4] = 12, + [0][1][RTW89_MKK][4] = 14, + [0][1][RTW89_IC][4] = 8, + [0][1][RTW89_KCC][4] = 32, + [0][1][RTW89_ACMA][4] = 12, + [0][1][RTW89_CHILE][4] = 18, + [0][1][RTW89_UKRAINE][4] = 12, + [0][1][RTW89_MEXICO][4] = 36, + [0][1][RTW89_CN][4] = 12, + [0][1][RTW89_QATAR][4] = 12, + [0][1][RTW89_FCC][6] = 36, + [0][1][RTW89_ETSI][6] = 12, + [0][1][RTW89_MKK][6] = 14, + [0][1][RTW89_IC][6] = 8, + [0][1][RTW89_KCC][6] = 4, + [0][1][RTW89_ACMA][6] = 12, + [0][1][RTW89_CHILE][6] = 18, + [0][1][RTW89_UKRAINE][6] = 12, + [0][1][RTW89_MEXICO][6] = 36, + [0][1][RTW89_CN][6] = 12, + [0][1][RTW89_QATAR][6] = 12, + [0][1][RTW89_FCC][8] = 36, + [0][1][RTW89_ETSI][8] = 12, + [0][1][RTW89_MKK][8] = 14, + [0][1][RTW89_IC][8] = 36, + [0][1][RTW89_KCC][8] = 32, + [0][1][RTW89_ACMA][8] = 12, + [0][1][RTW89_CHILE][8] = 36, + [0][1][RTW89_UKRAINE][8] = 12, + [0][1][RTW89_MEXICO][8] = 36, + [0][1][RTW89_CN][8] = 12, + [0][1][RTW89_QATAR][8] = 12, + [0][1][RTW89_FCC][10] = 36, + [0][1][RTW89_ETSI][10] = 12, + [0][1][RTW89_MKK][10] = 14, + [0][1][RTW89_IC][10] = 36, + [0][1][RTW89_KCC][10] = 32, + [0][1][RTW89_ACMA][10] = 12, + [0][1][RTW89_CHILE][10] = 36, + [0][1][RTW89_UKRAINE][10] = 12, + [0][1][RTW89_MEXICO][10] = 36, + [0][1][RTW89_CN][10] = 12, + [0][1][RTW89_QATAR][10] = 12, + [0][1][RTW89_FCC][12] = 36, + [0][1][RTW89_ETSI][12] = 12, + [0][1][RTW89_MKK][12] = 14, + [0][1][RTW89_IC][12] = 36, + [0][1][RTW89_KCC][12] = 32, + [0][1][RTW89_ACMA][12] = 12, + [0][1][RTW89_CHILE][12] = 36, + [0][1][RTW89_UKRAINE][12] = 12, + [0][1][RTW89_MEXICO][12] = 36, + [0][1][RTW89_CN][12] = 12, + [0][1][RTW89_QATAR][12] = 12, + [0][1][RTW89_FCC][14] = 36, + [0][1][RTW89_ETSI][14] = 12, + [0][1][RTW89_MKK][14] = 14, + [0][1][RTW89_IC][14] = 36, + [0][1][RTW89_KCC][14] = 32, + [0][1][RTW89_ACMA][14] = 12, + [0][1][RTW89_CHILE][14] = 36, + [0][1][RTW89_UKRAINE][14] = 12, + [0][1][RTW89_MEXICO][14] = 36, + [0][1][RTW89_CN][14] = 12, + [0][1][RTW89_QATAR][14] = 12, + [0][1][RTW89_FCC][15] = 36, + [0][1][RTW89_ETSI][15] = 12, + [0][1][RTW89_MKK][15] = 32, + [0][1][RTW89_IC][15] = 36, + [0][1][RTW89_KCC][15] = 32, + [0][1][RTW89_ACMA][15] = 12, + [0][1][RTW89_CHILE][15] = 36, + [0][1][RTW89_UKRAINE][15] = 12, + [0][1][RTW89_MEXICO][15] = 36, + [0][1][RTW89_CN][15] = 127, + [0][1][RTW89_QATAR][15] = 12, + [0][1][RTW89_FCC][17] = 36, + [0][1][RTW89_ETSI][17] = 12, + [0][1][RTW89_MKK][17] = 32, + [0][1][RTW89_IC][17] = 36, + [0][1][RTW89_KCC][17] = 32, + [0][1][RTW89_ACMA][17] = 12, + [0][1][RTW89_CHILE][17] = 36, + [0][1][RTW89_UKRAINE][17] = 12, + [0][1][RTW89_MEXICO][17] = 36, + [0][1][RTW89_CN][17] = 127, + [0][1][RTW89_QATAR][17] = 12, + [0][1][RTW89_FCC][19] = 36, + [0][1][RTW89_ETSI][19] = 12, + [0][1][RTW89_MKK][19] = 32, + [0][1][RTW89_IC][19] = 36, + [0][1][RTW89_KCC][19] = 32, + [0][1][RTW89_ACMA][19] = 12, + [0][1][RTW89_CHILE][19] = 36, + [0][1][RTW89_UKRAINE][19] = 12, + [0][1][RTW89_MEXICO][19] = 36, + [0][1][RTW89_CN][19] = 127, + [0][1][RTW89_QATAR][19] = 12, + [0][1][RTW89_FCC][21] = 36, + [0][1][RTW89_ETSI][21] = 12, + [0][1][RTW89_MKK][21] = 32, + [0][1][RTW89_IC][21] = 36, + [0][1][RTW89_KCC][21] = 32, + [0][1][RTW89_ACMA][21] = 12, + [0][1][RTW89_CHILE][21] = 36, + [0][1][RTW89_UKRAINE][21] = 12, + [0][1][RTW89_MEXICO][21] = 36, + [0][1][RTW89_CN][21] = 127, + [0][1][RTW89_QATAR][21] = 12, + [0][1][RTW89_FCC][23] = 36, + [0][1][RTW89_ETSI][23] = 12, + [0][1][RTW89_MKK][23] = 32, + [0][1][RTW89_IC][23] = 36, + [0][1][RTW89_KCC][23] = 32, + [0][1][RTW89_ACMA][23] = 12, + [0][1][RTW89_CHILE][23] = 36, + [0][1][RTW89_UKRAINE][23] = 12, + [0][1][RTW89_MEXICO][23] = 36, + [0][1][RTW89_CN][23] = 127, + [0][1][RTW89_QATAR][23] = 12, + [0][1][RTW89_FCC][25] = 36, + [0][1][RTW89_ETSI][25] = 12, + [0][1][RTW89_MKK][25] = 32, + [0][1][RTW89_IC][25] = 127, + [0][1][RTW89_KCC][25] = 32, + [0][1][RTW89_ACMA][25] = 127, + [0][1][RTW89_CHILE][25] = 36, + [0][1][RTW89_UKRAINE][25] = 12, + [0][1][RTW89_MEXICO][25] = 36, + [0][1][RTW89_CN][25] = 127, + [0][1][RTW89_QATAR][25] = 12, + [0][1][RTW89_FCC][27] = 36, + [0][1][RTW89_ETSI][27] = 12, + [0][1][RTW89_MKK][27] = 32, + [0][1][RTW89_IC][27] = 127, + [0][1][RTW89_KCC][27] = 32, + [0][1][RTW89_ACMA][27] = 127, + [0][1][RTW89_CHILE][27] = 36, + [0][1][RTW89_UKRAINE][27] = 12, + [0][1][RTW89_MEXICO][27] = 36, + [0][1][RTW89_CN][27] = 127, + [0][1][RTW89_QATAR][27] = 12, + [0][1][RTW89_FCC][29] = 36, + [0][1][RTW89_ETSI][29] = 12, + [0][1][RTW89_MKK][29] = 32, + [0][1][RTW89_IC][29] = 127, + [0][1][RTW89_KCC][29] = 32, + [0][1][RTW89_ACMA][29] = 127, + [0][1][RTW89_CHILE][29] = 36, + [0][1][RTW89_UKRAINE][29] = 12, + [0][1][RTW89_MEXICO][29] = 36, + [0][1][RTW89_CN][29] = 127, + [0][1][RTW89_QATAR][29] = 12, + [0][1][RTW89_FCC][31] = 36, + [0][1][RTW89_ETSI][31] = 12, + [0][1][RTW89_MKK][31] = 32, + [0][1][RTW89_IC][31] = 36, + [0][1][RTW89_KCC][31] = 32, + [0][1][RTW89_ACMA][31] = 12, + [0][1][RTW89_CHILE][31] = 36, + [0][1][RTW89_UKRAINE][31] = 12, + [0][1][RTW89_MEXICO][31] = 36, + [0][1][RTW89_CN][31] = 127, + [0][1][RTW89_QATAR][31] = 12, + [0][1][RTW89_FCC][33] = 36, + [0][1][RTW89_ETSI][33] = 12, + [0][1][RTW89_MKK][33] = 32, + [0][1][RTW89_IC][33] = 36, + [0][1][RTW89_KCC][33] = 32, + [0][1][RTW89_ACMA][33] = 12, + [0][1][RTW89_CHILE][33] = 36, + [0][1][RTW89_UKRAINE][33] = 12, + [0][1][RTW89_MEXICO][33] = 36, + [0][1][RTW89_CN][33] = 127, + [0][1][RTW89_QATAR][33] = 12, + [0][1][RTW89_FCC][35] = 36, + [0][1][RTW89_ETSI][35] = 12, + [0][1][RTW89_MKK][35] = 32, + [0][1][RTW89_IC][35] = 36, + [0][1][RTW89_KCC][35] = 32, + [0][1][RTW89_ACMA][35] = 12, + [0][1][RTW89_CHILE][35] = 36, + [0][1][RTW89_UKRAINE][35] = 12, + [0][1][RTW89_MEXICO][35] = 36, + [0][1][RTW89_CN][35] = 127, + [0][1][RTW89_QATAR][35] = 12, + [0][1][RTW89_FCC][37] = 36, + [0][1][RTW89_ETSI][37] = 127, + [0][1][RTW89_MKK][37] = 32, + [0][1][RTW89_IC][37] = 36, + [0][1][RTW89_KCC][37] = 32, + [0][1][RTW89_ACMA][37] = 36, + [0][1][RTW89_CHILE][37] = 36, + [0][1][RTW89_UKRAINE][37] = 127, + [0][1][RTW89_MEXICO][37] = 36, + [0][1][RTW89_CN][37] = 127, + [0][1][RTW89_QATAR][37] = 127, + [0][1][RTW89_FCC][38] = 72, + [0][1][RTW89_ETSI][38] = 16, + [0][1][RTW89_MKK][38] = 127, + [0][1][RTW89_IC][38] = 72, + [0][1][RTW89_KCC][38] = 32, + [0][1][RTW89_ACMA][38] = 76, + [0][1][RTW89_CHILE][38] = 42, + [0][1][RTW89_UKRAINE][38] = 16, + [0][1][RTW89_MEXICO][38] = 72, + [0][1][RTW89_CN][38] = 50, + [0][1][RTW89_QATAR][38] = 16, + [0][1][RTW89_FCC][40] = 76, + [0][1][RTW89_ETSI][40] = 16, + [0][1][RTW89_MKK][40] = 127, + [0][1][RTW89_IC][40] = 76, + [0][1][RTW89_KCC][40] = 32, + [0][1][RTW89_ACMA][40] = 76, + [0][1][RTW89_CHILE][40] = 42, + [0][1][RTW89_UKRAINE][40] = 16, + [0][1][RTW89_MEXICO][40] = 76, + [0][1][RTW89_CN][40] = 50, + [0][1][RTW89_QATAR][40] = 16, + [0][1][RTW89_FCC][42] = 76, + [0][1][RTW89_ETSI][42] = 16, + [0][1][RTW89_MKK][42] = 127, + [0][1][RTW89_IC][42] = 76, + [0][1][RTW89_KCC][42] = 32, + [0][1][RTW89_ACMA][42] = 76, + [0][1][RTW89_CHILE][42] = 42, + [0][1][RTW89_UKRAINE][42] = 16, + [0][1][RTW89_MEXICO][42] = 76, + [0][1][RTW89_CN][42] = 50, + [0][1][RTW89_QATAR][42] = 16, + [0][1][RTW89_FCC][44] = 76, + [0][1][RTW89_ETSI][44] = 16, + [0][1][RTW89_MKK][44] = 127, + [0][1][RTW89_IC][44] = 76, + [0][1][RTW89_KCC][44] = 32, + [0][1][RTW89_ACMA][44] = 76, + [0][1][RTW89_CHILE][44] = 42, + [0][1][RTW89_UKRAINE][44] = 16, + [0][1][RTW89_MEXICO][44] = 76, + [0][1][RTW89_CN][44] = 50, + [0][1][RTW89_QATAR][44] = 16, + [0][1][RTW89_FCC][46] = 76, + [0][1][RTW89_ETSI][46] = 16, + [0][1][RTW89_MKK][46] = 127, + [0][1][RTW89_IC][46] = 76, + [0][1][RTW89_KCC][46] = 32, + [0][1][RTW89_ACMA][46] = 76, + [0][1][RTW89_CHILE][46] = 42, + [0][1][RTW89_UKRAINE][46] = 16, + [0][1][RTW89_MEXICO][46] = 76, + [0][1][RTW89_CN][46] = 50, + [0][1][RTW89_QATAR][46] = 16, + [1][0][RTW89_FCC][0] = 62, + [1][0][RTW89_ETSI][0] = 36, + [1][0][RTW89_MKK][0] = 36, + [1][0][RTW89_IC][0] = 34, + [1][0][RTW89_KCC][0] = 58, + [1][0][RTW89_ACMA][0] = 36, + [1][0][RTW89_CHILE][0] = 30, + [1][0][RTW89_UKRAINE][0] = 36, + [1][0][RTW89_MEXICO][0] = 62, + [1][0][RTW89_CN][0] = 36, + [1][0][RTW89_QATAR][0] = 36, + [1][0][RTW89_FCC][2] = 62, + [1][0][RTW89_ETSI][2] = 36, + [1][0][RTW89_MKK][2] = 36, + [1][0][RTW89_IC][2] = 34, + [1][0][RTW89_KCC][2] = 58, + [1][0][RTW89_ACMA][2] = 36, + [1][0][RTW89_CHILE][2] = 30, + [1][0][RTW89_UKRAINE][2] = 36, + [1][0][RTW89_MEXICO][2] = 62, + [1][0][RTW89_CN][2] = 36, + [1][0][RTW89_QATAR][2] = 36, + [1][0][RTW89_FCC][4] = 62, + [1][0][RTW89_ETSI][4] = 36, + [1][0][RTW89_MKK][4] = 36, + [1][0][RTW89_IC][4] = 34, + [1][0][RTW89_KCC][4] = 58, + [1][0][RTW89_ACMA][4] = 36, + [1][0][RTW89_CHILE][4] = 30, + [1][0][RTW89_UKRAINE][4] = 36, + [1][0][RTW89_MEXICO][4] = 62, + [1][0][RTW89_CN][4] = 36, + [1][0][RTW89_QATAR][4] = 36, + [1][0][RTW89_FCC][6] = 62, + [1][0][RTW89_ETSI][6] = 36, + [1][0][RTW89_MKK][6] = 36, + [1][0][RTW89_IC][6] = 34, + [1][0][RTW89_KCC][6] = 32, + [1][0][RTW89_ACMA][6] = 36, + [1][0][RTW89_CHILE][6] = 30, + [1][0][RTW89_UKRAINE][6] = 36, + [1][0][RTW89_MEXICO][6] = 62, + [1][0][RTW89_CN][6] = 36, + [1][0][RTW89_QATAR][6] = 36, + [1][0][RTW89_FCC][8] = 62, + [1][0][RTW89_ETSI][8] = 36, + [1][0][RTW89_MKK][8] = 36, + [1][0][RTW89_IC][8] = 62, + [1][0][RTW89_KCC][8] = 58, + [1][0][RTW89_ACMA][8] = 36, + [1][0][RTW89_CHILE][8] = 54, + [1][0][RTW89_UKRAINE][8] = 36, + [1][0][RTW89_MEXICO][8] = 62, + [1][0][RTW89_CN][8] = 36, + [1][0][RTW89_QATAR][8] = 36, + [1][0][RTW89_FCC][10] = 62, + [1][0][RTW89_ETSI][10] = 36, + [1][0][RTW89_MKK][10] = 36, + [1][0][RTW89_IC][10] = 62, + [1][0][RTW89_KCC][10] = 58, + [1][0][RTW89_ACMA][10] = 36, + [1][0][RTW89_CHILE][10] = 54, + [1][0][RTW89_UKRAINE][10] = 36, + [1][0][RTW89_MEXICO][10] = 62, + [1][0][RTW89_CN][10] = 36, + [1][0][RTW89_QATAR][10] = 36, + [1][0][RTW89_FCC][12] = 62, + [1][0][RTW89_ETSI][12] = 36, + [1][0][RTW89_MKK][12] = 36, + [1][0][RTW89_IC][12] = 62, + [1][0][RTW89_KCC][12] = 58, + [1][0][RTW89_ACMA][12] = 36, + [1][0][RTW89_CHILE][12] = 54, + [1][0][RTW89_UKRAINE][12] = 36, + [1][0][RTW89_MEXICO][12] = 62, + [1][0][RTW89_CN][12] = 36, + [1][0][RTW89_QATAR][12] = 36, + [1][0][RTW89_FCC][14] = 62, + [1][0][RTW89_ETSI][14] = 36, + [1][0][RTW89_MKK][14] = 36, + [1][0][RTW89_IC][14] = 62, + [1][0][RTW89_KCC][14] = 58, + [1][0][RTW89_ACMA][14] = 36, + [1][0][RTW89_CHILE][14] = 54, + [1][0][RTW89_UKRAINE][14] = 36, + [1][0][RTW89_MEXICO][14] = 62, + [1][0][RTW89_CN][14] = 36, + [1][0][RTW89_QATAR][14] = 36, + [1][0][RTW89_FCC][15] = 62, + [1][0][RTW89_ETSI][15] = 36, + [1][0][RTW89_MKK][15] = 58, + [1][0][RTW89_IC][15] = 62, + [1][0][RTW89_KCC][15] = 58, + [1][0][RTW89_ACMA][15] = 36, + [1][0][RTW89_CHILE][15] = 54, + [1][0][RTW89_UKRAINE][15] = 36, + [1][0][RTW89_MEXICO][15] = 62, + [1][0][RTW89_CN][15] = 127, + [1][0][RTW89_QATAR][15] = 36, + [1][0][RTW89_FCC][17] = 62, + [1][0][RTW89_ETSI][17] = 36, + [1][0][RTW89_MKK][17] = 58, + [1][0][RTW89_IC][17] = 62, + [1][0][RTW89_KCC][17] = 58, + [1][0][RTW89_ACMA][17] = 36, + [1][0][RTW89_CHILE][17] = 54, + [1][0][RTW89_UKRAINE][17] = 36, + [1][0][RTW89_MEXICO][17] = 62, + [1][0][RTW89_CN][17] = 127, + [1][0][RTW89_QATAR][17] = 36, + [1][0][RTW89_FCC][19] = 62, + [1][0][RTW89_ETSI][19] = 36, + [1][0][RTW89_MKK][19] = 58, + [1][0][RTW89_IC][19] = 62, + [1][0][RTW89_KCC][19] = 58, + [1][0][RTW89_ACMA][19] = 36, + [1][0][RTW89_CHILE][19] = 54, + [1][0][RTW89_UKRAINE][19] = 36, + [1][0][RTW89_MEXICO][19] = 62, + [1][0][RTW89_CN][19] = 127, + [1][0][RTW89_QATAR][19] = 36, + [1][0][RTW89_FCC][21] = 62, + [1][0][RTW89_ETSI][21] = 36, + [1][0][RTW89_MKK][21] = 58, + [1][0][RTW89_IC][21] = 62, + [1][0][RTW89_KCC][21] = 58, + [1][0][RTW89_ACMA][21] = 36, + [1][0][RTW89_CHILE][21] = 54, + [1][0][RTW89_UKRAINE][21] = 36, + [1][0][RTW89_MEXICO][21] = 62, + [1][0][RTW89_CN][21] = 127, + [1][0][RTW89_QATAR][21] = 36, + [1][0][RTW89_FCC][23] = 62, + [1][0][RTW89_ETSI][23] = 36, + [1][0][RTW89_MKK][23] = 58, + [1][0][RTW89_IC][23] = 62, + [1][0][RTW89_KCC][23] = 58, + [1][0][RTW89_ACMA][23] = 36, + [1][0][RTW89_CHILE][23] = 54, + [1][0][RTW89_UKRAINE][23] = 36, + [1][0][RTW89_MEXICO][23] = 62, + [1][0][RTW89_CN][23] = 127, + [1][0][RTW89_QATAR][23] = 36, + [1][0][RTW89_FCC][25] = 62, + [1][0][RTW89_ETSI][25] = 36, + [1][0][RTW89_MKK][25] = 58, + [1][0][RTW89_IC][25] = 127, + [1][0][RTW89_KCC][25] = 58, + [1][0][RTW89_ACMA][25] = 127, + [1][0][RTW89_CHILE][25] = 54, + [1][0][RTW89_UKRAINE][25] = 36, + [1][0][RTW89_MEXICO][25] = 62, + [1][0][RTW89_CN][25] = 127, + [1][0][RTW89_QATAR][25] = 36, + [1][0][RTW89_FCC][27] = 62, + [1][0][RTW89_ETSI][27] = 36, + [1][0][RTW89_MKK][27] = 58, + [1][0][RTW89_IC][27] = 127, + [1][0][RTW89_KCC][27] = 58, + [1][0][RTW89_ACMA][27] = 127, + [1][0][RTW89_CHILE][27] = 54, + [1][0][RTW89_UKRAINE][27] = 36, + [1][0][RTW89_MEXICO][27] = 62, + [1][0][RTW89_CN][27] = 127, + [1][0][RTW89_QATAR][27] = 36, + [1][0][RTW89_FCC][29] = 62, + [1][0][RTW89_ETSI][29] = 36, + [1][0][RTW89_MKK][29] = 58, + [1][0][RTW89_IC][29] = 127, + [1][0][RTW89_KCC][29] = 58, + [1][0][RTW89_ACMA][29] = 127, + [1][0][RTW89_CHILE][29] = 54, + [1][0][RTW89_UKRAINE][29] = 36, + [1][0][RTW89_MEXICO][29] = 62, + [1][0][RTW89_CN][29] = 127, + [1][0][RTW89_QATAR][29] = 36, + [1][0][RTW89_FCC][31] = 62, + [1][0][RTW89_ETSI][31] = 36, + [1][0][RTW89_MKK][31] = 58, + [1][0][RTW89_IC][31] = 62, + [1][0][RTW89_KCC][31] = 58, + [1][0][RTW89_ACMA][31] = 36, + [1][0][RTW89_CHILE][31] = 54, + [1][0][RTW89_UKRAINE][31] = 36, + [1][0][RTW89_MEXICO][31] = 62, + [1][0][RTW89_CN][31] = 127, + [1][0][RTW89_QATAR][31] = 36, + [1][0][RTW89_FCC][33] = 62, + [1][0][RTW89_ETSI][33] = 36, + [1][0][RTW89_MKK][33] = 58, + [1][0][RTW89_IC][33] = 62, + [1][0][RTW89_KCC][33] = 58, + [1][0][RTW89_ACMA][33] = 36, + [1][0][RTW89_CHILE][33] = 54, + [1][0][RTW89_UKRAINE][33] = 36, + [1][0][RTW89_MEXICO][33] = 62, + [1][0][RTW89_CN][33] = 127, + [1][0][RTW89_QATAR][33] = 36, + [1][0][RTW89_FCC][35] = 62, + [1][0][RTW89_ETSI][35] = 36, + [1][0][RTW89_MKK][35] = 58, + [1][0][RTW89_IC][35] = 62, + [1][0][RTW89_KCC][35] = 58, + [1][0][RTW89_ACMA][35] = 36, + [1][0][RTW89_CHILE][35] = 54, + [1][0][RTW89_UKRAINE][35] = 36, + [1][0][RTW89_MEXICO][35] = 62, + [1][0][RTW89_CN][35] = 127, + [1][0][RTW89_QATAR][35] = 36, + [1][0][RTW89_FCC][37] = 62, + [1][0][RTW89_ETSI][37] = 127, + [1][0][RTW89_MKK][37] = 58, + [1][0][RTW89_IC][37] = 62, + [1][0][RTW89_KCC][37] = 58, + [1][0][RTW89_ACMA][37] = 62, + [1][0][RTW89_CHILE][37] = 54, + [1][0][RTW89_UKRAINE][37] = 127, + [1][0][RTW89_MEXICO][37] = 62, + [1][0][RTW89_CN][37] = 127, + [1][0][RTW89_QATAR][37] = 127, + [1][0][RTW89_FCC][38] = 76, + [1][0][RTW89_ETSI][38] = 28, + [1][0][RTW89_MKK][38] = 127, + [1][0][RTW89_IC][38] = 76, + [1][0][RTW89_KCC][38] = 58, + [1][0][RTW89_ACMA][38] = 76, + [1][0][RTW89_CHILE][38] = 54, + [1][0][RTW89_UKRAINE][38] = 28, + [1][0][RTW89_MEXICO][38] = 76, + [1][0][RTW89_CN][38] = 74, + [1][0][RTW89_QATAR][38] = 28, + [1][0][RTW89_FCC][40] = 76, + [1][0][RTW89_ETSI][40] = 28, + [1][0][RTW89_MKK][40] = 127, + [1][0][RTW89_IC][40] = 76, + [1][0][RTW89_KCC][40] = 58, + [1][0][RTW89_ACMA][40] = 76, + [1][0][RTW89_CHILE][40] = 54, + [1][0][RTW89_UKRAINE][40] = 28, + [1][0][RTW89_MEXICO][40] = 76, + [1][0][RTW89_CN][40] = 74, + [1][0][RTW89_QATAR][40] = 28, + [1][0][RTW89_FCC][42] = 76, + [1][0][RTW89_ETSI][42] = 28, + [1][0][RTW89_MKK][42] = 127, + [1][0][RTW89_IC][42] = 76, + [1][0][RTW89_KCC][42] = 58, + [1][0][RTW89_ACMA][42] = 76, + [1][0][RTW89_CHILE][42] = 54, + [1][0][RTW89_UKRAINE][42] = 28, + [1][0][RTW89_MEXICO][42] = 76, + [1][0][RTW89_CN][42] = 74, + [1][0][RTW89_QATAR][42] = 28, + [1][0][RTW89_FCC][44] = 76, + [1][0][RTW89_ETSI][44] = 28, + [1][0][RTW89_MKK][44] = 127, + [1][0][RTW89_IC][44] = 76, + [1][0][RTW89_KCC][44] = 58, + [1][0][RTW89_ACMA][44] = 76, + [1][0][RTW89_CHILE][44] = 54, + [1][0][RTW89_UKRAINE][44] = 28, + [1][0][RTW89_MEXICO][44] = 76, + [1][0][RTW89_CN][44] = 74, + [1][0][RTW89_QATAR][44] = 28, + [1][0][RTW89_FCC][46] = 76, + [1][0][RTW89_ETSI][46] = 28, + [1][0][RTW89_MKK][46] = 127, + [1][0][RTW89_IC][46] = 76, + [1][0][RTW89_KCC][46] = 58, + [1][0][RTW89_ACMA][46] = 76, + [1][0][RTW89_CHILE][46] = 54, + [1][0][RTW89_UKRAINE][46] = 28, + [1][0][RTW89_MEXICO][46] = 76, + [1][0][RTW89_CN][46] = 74, + [1][0][RTW89_QATAR][46] = 28, + [1][1][RTW89_FCC][0] = 46, + [1][1][RTW89_ETSI][0] = 22, + [1][1][RTW89_MKK][0] = 24, + [1][1][RTW89_IC][0] = 18, + [1][1][RTW89_KCC][0] = 44, + [1][1][RTW89_ACMA][0] = 22, + [1][1][RTW89_CHILE][0] = 18, + [1][1][RTW89_UKRAINE][0] = 22, + [1][1][RTW89_MEXICO][0] = 46, + [1][1][RTW89_CN][0] = 22, + [1][1][RTW89_QATAR][0] = 22, + [1][1][RTW89_FCC][2] = 46, + [1][1][RTW89_ETSI][2] = 22, + [1][1][RTW89_MKK][2] = 24, + [1][1][RTW89_IC][2] = 18, + [1][1][RTW89_KCC][2] = 44, + [1][1][RTW89_ACMA][2] = 22, + [1][1][RTW89_CHILE][2] = 18, + [1][1][RTW89_UKRAINE][2] = 22, + [1][1][RTW89_MEXICO][2] = 46, + [1][1][RTW89_CN][2] = 22, + [1][1][RTW89_QATAR][2] = 22, + [1][1][RTW89_FCC][4] = 46, + [1][1][RTW89_ETSI][4] = 22, + [1][1][RTW89_MKK][4] = 24, + [1][1][RTW89_IC][4] = 18, + [1][1][RTW89_KCC][4] = 44, + [1][1][RTW89_ACMA][4] = 22, + [1][1][RTW89_CHILE][4] = 18, + [1][1][RTW89_UKRAINE][4] = 22, + [1][1][RTW89_MEXICO][4] = 46, + [1][1][RTW89_CN][4] = 22, + [1][1][RTW89_QATAR][4] = 22, + [1][1][RTW89_FCC][6] = 46, + [1][1][RTW89_ETSI][6] = 22, + [1][1][RTW89_MKK][6] = 24, + [1][1][RTW89_IC][6] = 18, + [1][1][RTW89_KCC][6] = 16, + [1][1][RTW89_ACMA][6] = 22, + [1][1][RTW89_CHILE][6] = 18, + [1][1][RTW89_UKRAINE][6] = 22, + [1][1][RTW89_MEXICO][6] = 46, + [1][1][RTW89_CN][6] = 22, + [1][1][RTW89_QATAR][6] = 22, + [1][1][RTW89_FCC][8] = 46, + [1][1][RTW89_ETSI][8] = 22, + [1][1][RTW89_MKK][8] = 24, + [1][1][RTW89_IC][8] = 46, + [1][1][RTW89_KCC][8] = 44, + [1][1][RTW89_ACMA][8] = 22, + [1][1][RTW89_CHILE][8] = 42, + [1][1][RTW89_UKRAINE][8] = 22, + [1][1][RTW89_MEXICO][8] = 46, + [1][1][RTW89_CN][8] = 22, + [1][1][RTW89_QATAR][8] = 22, + [1][1][RTW89_FCC][10] = 46, + [1][1][RTW89_ETSI][10] = 22, + [1][1][RTW89_MKK][10] = 24, + [1][1][RTW89_IC][10] = 46, + [1][1][RTW89_KCC][10] = 44, + [1][1][RTW89_ACMA][10] = 22, + [1][1][RTW89_CHILE][10] = 42, + [1][1][RTW89_UKRAINE][10] = 22, + [1][1][RTW89_MEXICO][10] = 46, + [1][1][RTW89_CN][10] = 22, + [1][1][RTW89_QATAR][10] = 22, + [1][1][RTW89_FCC][12] = 46, + [1][1][RTW89_ETSI][12] = 22, + [1][1][RTW89_MKK][12] = 24, + [1][1][RTW89_IC][12] = 46, + [1][1][RTW89_KCC][12] = 44, + [1][1][RTW89_ACMA][12] = 22, + [1][1][RTW89_CHILE][12] = 42, + [1][1][RTW89_UKRAINE][12] = 22, + [1][1][RTW89_MEXICO][12] = 46, + [1][1][RTW89_CN][12] = 22, + [1][1][RTW89_QATAR][12] = 22, + [1][1][RTW89_FCC][14] = 46, + [1][1][RTW89_ETSI][14] = 22, + [1][1][RTW89_MKK][14] = 24, + [1][1][RTW89_IC][14] = 46, + [1][1][RTW89_KCC][14] = 44, + [1][1][RTW89_ACMA][14] = 22, + [1][1][RTW89_CHILE][14] = 42, + [1][1][RTW89_UKRAINE][14] = 22, + [1][1][RTW89_MEXICO][14] = 46, + [1][1][RTW89_CN][14] = 22, + [1][1][RTW89_QATAR][14] = 22, + [1][1][RTW89_FCC][15] = 46, + [1][1][RTW89_ETSI][15] = 22, + [1][1][RTW89_MKK][15] = 46, + [1][1][RTW89_IC][15] = 46, + [1][1][RTW89_KCC][15] = 44, + [1][1][RTW89_ACMA][15] = 22, + [1][1][RTW89_CHILE][15] = 42, + [1][1][RTW89_UKRAINE][15] = 22, + [1][1][RTW89_MEXICO][15] = 46, + [1][1][RTW89_CN][15] = 127, + [1][1][RTW89_QATAR][15] = 22, + [1][1][RTW89_FCC][17] = 46, + [1][1][RTW89_ETSI][17] = 22, + [1][1][RTW89_MKK][17] = 46, + [1][1][RTW89_IC][17] = 46, + [1][1][RTW89_KCC][17] = 44, + [1][1][RTW89_ACMA][17] = 22, + [1][1][RTW89_CHILE][17] = 42, + [1][1][RTW89_UKRAINE][17] = 22, + [1][1][RTW89_MEXICO][17] = 46, + [1][1][RTW89_CN][17] = 127, + [1][1][RTW89_QATAR][17] = 22, + [1][1][RTW89_FCC][19] = 46, + [1][1][RTW89_ETSI][19] = 22, + [1][1][RTW89_MKK][19] = 46, + [1][1][RTW89_IC][19] = 46, + [1][1][RTW89_KCC][19] = 44, + [1][1][RTW89_ACMA][19] = 22, + [1][1][RTW89_CHILE][19] = 42, + [1][1][RTW89_UKRAINE][19] = 22, + [1][1][RTW89_MEXICO][19] = 46, + [1][1][RTW89_CN][19] = 127, + [1][1][RTW89_QATAR][19] = 22, + [1][1][RTW89_FCC][21] = 46, + [1][1][RTW89_ETSI][21] = 22, + [1][1][RTW89_MKK][21] = 46, + [1][1][RTW89_IC][21] = 46, + [1][1][RTW89_KCC][21] = 44, + [1][1][RTW89_ACMA][21] = 22, + [1][1][RTW89_CHILE][21] = 42, + [1][1][RTW89_UKRAINE][21] = 22, + [1][1][RTW89_MEXICO][21] = 46, + [1][1][RTW89_CN][21] = 127, + [1][1][RTW89_QATAR][21] = 22, + [1][1][RTW89_FCC][23] = 46, + [1][1][RTW89_ETSI][23] = 22, + [1][1][RTW89_MKK][23] = 46, + [1][1][RTW89_IC][23] = 46, + [1][1][RTW89_KCC][23] = 44, + [1][1][RTW89_ACMA][23] = 22, + [1][1][RTW89_CHILE][23] = 42, + [1][1][RTW89_UKRAINE][23] = 22, + [1][1][RTW89_MEXICO][23] = 46, + [1][1][RTW89_CN][23] = 127, + [1][1][RTW89_QATAR][23] = 22, + [1][1][RTW89_FCC][25] = 46, + [1][1][RTW89_ETSI][25] = 22, + [1][1][RTW89_MKK][25] = 46, + [1][1][RTW89_IC][25] = 127, + [1][1][RTW89_KCC][25] = 44, + [1][1][RTW89_ACMA][25] = 127, + [1][1][RTW89_CHILE][25] = 42, + [1][1][RTW89_UKRAINE][25] = 22, + [1][1][RTW89_MEXICO][25] = 46, + [1][1][RTW89_CN][25] = 127, + [1][1][RTW89_QATAR][25] = 22, + [1][1][RTW89_FCC][27] = 46, + [1][1][RTW89_ETSI][27] = 22, + [1][1][RTW89_MKK][27] = 46, + [1][1][RTW89_IC][27] = 127, + [1][1][RTW89_KCC][27] = 44, + [1][1][RTW89_ACMA][27] = 127, + [1][1][RTW89_CHILE][27] = 42, + [1][1][RTW89_UKRAINE][27] = 22, + [1][1][RTW89_MEXICO][27] = 46, + [1][1][RTW89_CN][27] = 127, + [1][1][RTW89_QATAR][27] = 22, + [1][1][RTW89_FCC][29] = 46, + [1][1][RTW89_ETSI][29] = 22, + [1][1][RTW89_MKK][29] = 46, + [1][1][RTW89_IC][29] = 127, + [1][1][RTW89_KCC][29] = 44, + [1][1][RTW89_ACMA][29] = 127, + [1][1][RTW89_CHILE][29] = 42, + [1][1][RTW89_UKRAINE][29] = 22, + [1][1][RTW89_MEXICO][29] = 46, + [1][1][RTW89_CN][29] = 127, + [1][1][RTW89_QATAR][29] = 22, + [1][1][RTW89_FCC][31] = 46, + [1][1][RTW89_ETSI][31] = 22, + [1][1][RTW89_MKK][31] = 46, + [1][1][RTW89_IC][31] = 46, + [1][1][RTW89_KCC][31] = 44, + [1][1][RTW89_ACMA][31] = 22, + [1][1][RTW89_CHILE][31] = 42, + [1][1][RTW89_UKRAINE][31] = 22, + [1][1][RTW89_MEXICO][31] = 46, + [1][1][RTW89_CN][31] = 127, + [1][1][RTW89_QATAR][31] = 22, + [1][1][RTW89_FCC][33] = 46, + [1][1][RTW89_ETSI][33] = 22, + [1][1][RTW89_MKK][33] = 46, + [1][1][RTW89_IC][33] = 46, + [1][1][RTW89_KCC][33] = 44, + [1][1][RTW89_ACMA][33] = 22, + [1][1][RTW89_CHILE][33] = 42, + [1][1][RTW89_UKRAINE][33] = 22, + [1][1][RTW89_MEXICO][33] = 46, + [1][1][RTW89_CN][33] = 127, + [1][1][RTW89_QATAR][33] = 22, + [1][1][RTW89_FCC][35] = 46, + [1][1][RTW89_ETSI][35] = 22, + [1][1][RTW89_MKK][35] = 46, + [1][1][RTW89_IC][35] = 46, + [1][1][RTW89_KCC][35] = 44, + [1][1][RTW89_ACMA][35] = 22, + [1][1][RTW89_CHILE][35] = 42, + [1][1][RTW89_UKRAINE][35] = 22, + [1][1][RTW89_MEXICO][35] = 46, + [1][1][RTW89_CN][35] = 127, + [1][1][RTW89_QATAR][35] = 22, + [1][1][RTW89_FCC][37] = 46, + [1][1][RTW89_ETSI][37] = 127, + [1][1][RTW89_MKK][37] = 46, + [1][1][RTW89_IC][37] = 46, + [1][1][RTW89_KCC][37] = 44, + [1][1][RTW89_ACMA][37] = 50, + [1][1][RTW89_CHILE][37] = 42, + [1][1][RTW89_UKRAINE][37] = 127, + [1][1][RTW89_MEXICO][37] = 46, + [1][1][RTW89_CN][37] = 127, + [1][1][RTW89_QATAR][37] = 127, + [1][1][RTW89_FCC][38] = 74, + [1][1][RTW89_ETSI][38] = 16, + [1][1][RTW89_MKK][38] = 127, + [1][1][RTW89_IC][38] = 74, + [1][1][RTW89_KCC][38] = 44, + [1][1][RTW89_ACMA][38] = 76, + [1][1][RTW89_CHILE][38] = 42, + [1][1][RTW89_UKRAINE][38] = 16, + [1][1][RTW89_MEXICO][38] = 74, + [1][1][RTW89_CN][38] = 62, + [1][1][RTW89_QATAR][38] = 16, + [1][1][RTW89_FCC][40] = 76, + [1][1][RTW89_ETSI][40] = 16, + [1][1][RTW89_MKK][40] = 127, + [1][1][RTW89_IC][40] = 76, + [1][1][RTW89_KCC][40] = 44, + [1][1][RTW89_ACMA][40] = 76, + [1][1][RTW89_CHILE][40] = 42, + [1][1][RTW89_UKRAINE][40] = 16, + [1][1][RTW89_MEXICO][40] = 76, + [1][1][RTW89_CN][40] = 62, + [1][1][RTW89_QATAR][40] = 16, + [1][1][RTW89_FCC][42] = 76, + [1][1][RTW89_ETSI][42] = 16, + [1][1][RTW89_MKK][42] = 127, + [1][1][RTW89_IC][42] = 76, + [1][1][RTW89_KCC][42] = 44, + [1][1][RTW89_ACMA][42] = 76, + [1][1][RTW89_CHILE][42] = 42, + [1][1][RTW89_UKRAINE][42] = 16, + [1][1][RTW89_MEXICO][42] = 76, + [1][1][RTW89_CN][42] = 62, + [1][1][RTW89_QATAR][42] = 16, + [1][1][RTW89_FCC][44] = 76, + [1][1][RTW89_ETSI][44] = 16, + [1][1][RTW89_MKK][44] = 127, + [1][1][RTW89_IC][44] = 76, + [1][1][RTW89_KCC][44] = 44, + [1][1][RTW89_ACMA][44] = 76, + [1][1][RTW89_CHILE][44] = 42, + [1][1][RTW89_UKRAINE][44] = 16, + [1][1][RTW89_MEXICO][44] = 76, + [1][1][RTW89_CN][44] = 62, + [1][1][RTW89_QATAR][44] = 16, + [1][1][RTW89_FCC][46] = 76, + [1][1][RTW89_ETSI][46] = 16, + [1][1][RTW89_MKK][46] = 127, + [1][1][RTW89_IC][46] = 76, + [1][1][RTW89_KCC][46] = 44, + [1][1][RTW89_ACMA][46] = 76, + [1][1][RTW89_CHILE][46] = 42, + [1][1][RTW89_UKRAINE][46] = 16, + [1][1][RTW89_MEXICO][46] = 76, + [1][1][RTW89_CN][46] = 62, + [1][1][RTW89_QATAR][46] = 16, + [2][0][RTW89_FCC][0] = 74, + [2][0][RTW89_ETSI][0] = 46, + [2][0][RTW89_MKK][0] = 50, + [2][0][RTW89_IC][0] = 46, + [2][0][RTW89_KCC][0] = 70, + [2][0][RTW89_ACMA][0] = 46, + [2][0][RTW89_CHILE][0] = 30, + [2][0][RTW89_UKRAINE][0] = 46, + [2][0][RTW89_MEXICO][0] = 62, + [2][0][RTW89_CN][0] = 46, + [2][0][RTW89_QATAR][0] = 46, + [2][0][RTW89_FCC][2] = 74, + [2][0][RTW89_ETSI][2] = 46, + [2][0][RTW89_MKK][2] = 50, + [2][0][RTW89_IC][2] = 46, + [2][0][RTW89_KCC][2] = 70, + [2][0][RTW89_ACMA][2] = 46, + [2][0][RTW89_CHILE][2] = 30, + [2][0][RTW89_UKRAINE][2] = 46, + [2][0][RTW89_MEXICO][2] = 62, + [2][0][RTW89_CN][2] = 46, + [2][0][RTW89_QATAR][2] = 46, + [2][0][RTW89_FCC][4] = 74, + [2][0][RTW89_ETSI][4] = 46, + [2][0][RTW89_MKK][4] = 50, + [2][0][RTW89_IC][4] = 46, + [2][0][RTW89_KCC][4] = 70, + [2][0][RTW89_ACMA][4] = 46, + [2][0][RTW89_CHILE][4] = 30, + [2][0][RTW89_UKRAINE][4] = 46, + [2][0][RTW89_MEXICO][4] = 62, + [2][0][RTW89_CN][4] = 46, + [2][0][RTW89_QATAR][4] = 46, + [2][0][RTW89_FCC][6] = 74, + [2][0][RTW89_ETSI][6] = 46, + [2][0][RTW89_MKK][6] = 50, + [2][0][RTW89_IC][6] = 46, + [2][0][RTW89_KCC][6] = 44, + [2][0][RTW89_ACMA][6] = 46, + [2][0][RTW89_CHILE][6] = 30, + [2][0][RTW89_UKRAINE][6] = 46, + [2][0][RTW89_MEXICO][6] = 62, + [2][0][RTW89_CN][6] = 46, + [2][0][RTW89_QATAR][6] = 46, + [2][0][RTW89_FCC][8] = 74, + [2][0][RTW89_ETSI][8] = 46, + [2][0][RTW89_MKK][8] = 50, + [2][0][RTW89_IC][8] = 66, + [2][0][RTW89_KCC][8] = 70, + [2][0][RTW89_ACMA][8] = 46, + [2][0][RTW89_CHILE][8] = 54, + [2][0][RTW89_UKRAINE][8] = 46, + [2][0][RTW89_MEXICO][8] = 74, + [2][0][RTW89_CN][8] = 46, + [2][0][RTW89_QATAR][8] = 46, + [2][0][RTW89_FCC][10] = 74, + [2][0][RTW89_ETSI][10] = 46, + [2][0][RTW89_MKK][10] = 50, + [2][0][RTW89_IC][10] = 66, + [2][0][RTW89_KCC][10] = 70, + [2][0][RTW89_ACMA][10] = 46, + [2][0][RTW89_CHILE][10] = 54, + [2][0][RTW89_UKRAINE][10] = 46, + [2][0][RTW89_MEXICO][10] = 74, + [2][0][RTW89_CN][10] = 46, + [2][0][RTW89_QATAR][10] = 46, + [2][0][RTW89_FCC][12] = 74, + [2][0][RTW89_ETSI][12] = 46, + [2][0][RTW89_MKK][12] = 50, + [2][0][RTW89_IC][12] = 66, + [2][0][RTW89_KCC][12] = 70, + [2][0][RTW89_ACMA][12] = 46, + [2][0][RTW89_CHILE][12] = 54, + [2][0][RTW89_UKRAINE][12] = 46, + [2][0][RTW89_MEXICO][12] = 74, + [2][0][RTW89_CN][12] = 46, + [2][0][RTW89_QATAR][12] = 46, + [2][0][RTW89_FCC][14] = 74, + [2][0][RTW89_ETSI][14] = 46, + [2][0][RTW89_MKK][14] = 50, + [2][0][RTW89_IC][14] = 66, + [2][0][RTW89_KCC][14] = 70, + [2][0][RTW89_ACMA][14] = 46, + [2][0][RTW89_CHILE][14] = 54, + [2][0][RTW89_UKRAINE][14] = 46, + [2][0][RTW89_MEXICO][14] = 74, + [2][0][RTW89_CN][14] = 46, + [2][0][RTW89_QATAR][14] = 46, + [2][0][RTW89_FCC][15] = 74, + [2][0][RTW89_ETSI][15] = 46, + [2][0][RTW89_MKK][15] = 70, + [2][0][RTW89_IC][15] = 74, + [2][0][RTW89_KCC][15] = 70, + [2][0][RTW89_ACMA][15] = 46, + [2][0][RTW89_CHILE][15] = 54, + [2][0][RTW89_UKRAINE][15] = 46, + [2][0][RTW89_MEXICO][15] = 74, + [2][0][RTW89_CN][15] = 127, + [2][0][RTW89_QATAR][15] = 46, + [2][0][RTW89_FCC][17] = 74, + [2][0][RTW89_ETSI][17] = 46, + [2][0][RTW89_MKK][17] = 70, + [2][0][RTW89_IC][17] = 74, + [2][0][RTW89_KCC][17] = 70, + [2][0][RTW89_ACMA][17] = 46, + [2][0][RTW89_CHILE][17] = 54, + [2][0][RTW89_UKRAINE][17] = 46, + [2][0][RTW89_MEXICO][17] = 74, + [2][0][RTW89_CN][17] = 127, + [2][0][RTW89_QATAR][17] = 46, + [2][0][RTW89_FCC][19] = 74, + [2][0][RTW89_ETSI][19] = 46, + [2][0][RTW89_MKK][19] = 70, + [2][0][RTW89_IC][19] = 74, + [2][0][RTW89_KCC][19] = 70, + [2][0][RTW89_ACMA][19] = 46, + [2][0][RTW89_CHILE][19] = 54, + [2][0][RTW89_UKRAINE][19] = 46, + [2][0][RTW89_MEXICO][19] = 74, + [2][0][RTW89_CN][19] = 127, + [2][0][RTW89_QATAR][19] = 46, + [2][0][RTW89_FCC][21] = 74, + [2][0][RTW89_ETSI][21] = 46, + [2][0][RTW89_MKK][21] = 70, + [2][0][RTW89_IC][21] = 74, + [2][0][RTW89_KCC][21] = 70, + [2][0][RTW89_ACMA][21] = 46, + [2][0][RTW89_CHILE][21] = 54, + [2][0][RTW89_UKRAINE][21] = 46, + [2][0][RTW89_MEXICO][21] = 74, + [2][0][RTW89_CN][21] = 127, + [2][0][RTW89_QATAR][21] = 46, + [2][0][RTW89_FCC][23] = 74, + [2][0][RTW89_ETSI][23] = 46, + [2][0][RTW89_MKK][23] = 70, + [2][0][RTW89_IC][23] = 74, + [2][0][RTW89_KCC][23] = 70, + [2][0][RTW89_ACMA][23] = 46, + [2][0][RTW89_CHILE][23] = 54, + [2][0][RTW89_UKRAINE][23] = 46, + [2][0][RTW89_MEXICO][23] = 74, + [2][0][RTW89_CN][23] = 127, + [2][0][RTW89_QATAR][23] = 46, + [2][0][RTW89_FCC][25] = 74, + [2][0][RTW89_ETSI][25] = 46, + [2][0][RTW89_MKK][25] = 70, + [2][0][RTW89_IC][25] = 127, + [2][0][RTW89_KCC][25] = 70, + [2][0][RTW89_ACMA][25] = 127, + [2][0][RTW89_CHILE][25] = 54, + [2][0][RTW89_UKRAINE][25] = 46, + [2][0][RTW89_MEXICO][25] = 74, + [2][0][RTW89_CN][25] = 127, + [2][0][RTW89_QATAR][25] = 46, + [2][0][RTW89_FCC][27] = 74, + [2][0][RTW89_ETSI][27] = 46, + [2][0][RTW89_MKK][27] = 70, + [2][0][RTW89_IC][27] = 127, + [2][0][RTW89_KCC][27] = 70, + [2][0][RTW89_ACMA][27] = 127, + [2][0][RTW89_CHILE][27] = 54, + [2][0][RTW89_UKRAINE][27] = 46, + [2][0][RTW89_MEXICO][27] = 74, + [2][0][RTW89_CN][27] = 127, + [2][0][RTW89_QATAR][27] = 46, + [2][0][RTW89_FCC][29] = 74, + [2][0][RTW89_ETSI][29] = 46, + [2][0][RTW89_MKK][29] = 70, + [2][0][RTW89_IC][29] = 127, + [2][0][RTW89_KCC][29] = 70, + [2][0][RTW89_ACMA][29] = 127, + [2][0][RTW89_CHILE][29] = 54, + [2][0][RTW89_UKRAINE][29] = 46, + [2][0][RTW89_MEXICO][29] = 74, + [2][0][RTW89_CN][29] = 127, + [2][0][RTW89_QATAR][29] = 46, + [2][0][RTW89_FCC][31] = 74, + [2][0][RTW89_ETSI][31] = 46, + [2][0][RTW89_MKK][31] = 70, + [2][0][RTW89_IC][31] = 74, + [2][0][RTW89_KCC][31] = 70, + [2][0][RTW89_ACMA][31] = 46, + [2][0][RTW89_CHILE][31] = 54, + [2][0][RTW89_UKRAINE][31] = 46, + [2][0][RTW89_MEXICO][31] = 74, + [2][0][RTW89_CN][31] = 127, + [2][0][RTW89_QATAR][31] = 46, + [2][0][RTW89_FCC][33] = 74, + [2][0][RTW89_ETSI][33] = 46, + [2][0][RTW89_MKK][33] = 70, + [2][0][RTW89_IC][33] = 74, + [2][0][RTW89_KCC][33] = 70, + [2][0][RTW89_ACMA][33] = 46, + [2][0][RTW89_CHILE][33] = 54, + [2][0][RTW89_UKRAINE][33] = 46, + [2][0][RTW89_MEXICO][33] = 74, + [2][0][RTW89_CN][33] = 127, + [2][0][RTW89_QATAR][33] = 46, + [2][0][RTW89_FCC][35] = 74, + [2][0][RTW89_ETSI][35] = 46, + [2][0][RTW89_MKK][35] = 70, + [2][0][RTW89_IC][35] = 74, + [2][0][RTW89_KCC][35] = 70, + [2][0][RTW89_ACMA][35] = 46, + [2][0][RTW89_CHILE][35] = 54, + [2][0][RTW89_UKRAINE][35] = 46, + [2][0][RTW89_MEXICO][35] = 74, + [2][0][RTW89_CN][35] = 127, + [2][0][RTW89_QATAR][35] = 46, + [2][0][RTW89_FCC][37] = 74, + [2][0][RTW89_ETSI][37] = 127, + [2][0][RTW89_MKK][37] = 70, + [2][0][RTW89_IC][37] = 74, + [2][0][RTW89_KCC][37] = 70, + [2][0][RTW89_ACMA][37] = 74, + [2][0][RTW89_CHILE][37] = 54, + [2][0][RTW89_UKRAINE][37] = 127, + [2][0][RTW89_MEXICO][37] = 74, + [2][0][RTW89_CN][37] = 127, + [2][0][RTW89_QATAR][37] = 127, + [2][0][RTW89_FCC][38] = 76, + [2][0][RTW89_ETSI][38] = 28, + [2][0][RTW89_MKK][38] = 127, + [2][0][RTW89_IC][38] = 76, + [2][0][RTW89_KCC][38] = 70, + [2][0][RTW89_ACMA][38] = 76, + [2][0][RTW89_CHILE][38] = 54, + [2][0][RTW89_UKRAINE][38] = 28, + [2][0][RTW89_MEXICO][38] = 76, + [2][0][RTW89_CN][38] = 76, + [2][0][RTW89_QATAR][38] = 28, + [2][0][RTW89_FCC][40] = 76, + [2][0][RTW89_ETSI][40] = 28, + [2][0][RTW89_MKK][40] = 127, + [2][0][RTW89_IC][40] = 76, + [2][0][RTW89_KCC][40] = 70, + [2][0][RTW89_ACMA][40] = 76, + [2][0][RTW89_CHILE][40] = 54, + [2][0][RTW89_UKRAINE][40] = 28, + [2][0][RTW89_MEXICO][40] = 76, + [2][0][RTW89_CN][40] = 76, + [2][0][RTW89_QATAR][40] = 28, + [2][0][RTW89_FCC][42] = 76, + [2][0][RTW89_ETSI][42] = 28, + [2][0][RTW89_MKK][42] = 127, + [2][0][RTW89_IC][42] = 76, + [2][0][RTW89_KCC][42] = 70, + [2][0][RTW89_ACMA][42] = 76, + [2][0][RTW89_CHILE][42] = 54, + [2][0][RTW89_UKRAINE][42] = 28, + [2][0][RTW89_MEXICO][42] = 76, + [2][0][RTW89_CN][42] = 76, + [2][0][RTW89_QATAR][42] = 28, + [2][0][RTW89_FCC][44] = 76, + [2][0][RTW89_ETSI][44] = 28, + [2][0][RTW89_MKK][44] = 127, + [2][0][RTW89_IC][44] = 76, + [2][0][RTW89_KCC][44] = 70, + [2][0][RTW89_ACMA][44] = 76, + [2][0][RTW89_CHILE][44] = 54, + [2][0][RTW89_UKRAINE][44] = 28, + [2][0][RTW89_MEXICO][44] = 76, + [2][0][RTW89_CN][44] = 76, + [2][0][RTW89_QATAR][44] = 28, + [2][0][RTW89_FCC][46] = 76, + [2][0][RTW89_ETSI][46] = 28, + [2][0][RTW89_MKK][46] = 127, + [2][0][RTW89_IC][46] = 76, + [2][0][RTW89_KCC][46] = 70, + [2][0][RTW89_ACMA][46] = 76, + [2][0][RTW89_CHILE][46] = 54, + [2][0][RTW89_UKRAINE][46] = 28, + [2][0][RTW89_MEXICO][46] = 76, + [2][0][RTW89_CN][46] = 76, + [2][0][RTW89_QATAR][46] = 28, + [2][1][RTW89_FCC][0] = 58, + [2][1][RTW89_ETSI][0] = 32, + [2][1][RTW89_MKK][0] = 38, + [2][1][RTW89_IC][0] = 30, + [2][1][RTW89_KCC][0] = 54, + [2][1][RTW89_ACMA][0] = 32, + [2][1][RTW89_CHILE][0] = 18, + [2][1][RTW89_UKRAINE][0] = 32, + [2][1][RTW89_MEXICO][0] = 50, + [2][1][RTW89_CN][0] = 32, + [2][1][RTW89_QATAR][0] = 32, + [2][1][RTW89_FCC][2] = 58, + [2][1][RTW89_ETSI][2] = 32, + [2][1][RTW89_MKK][2] = 38, + [2][1][RTW89_IC][2] = 30, + [2][1][RTW89_KCC][2] = 54, + [2][1][RTW89_ACMA][2] = 32, + [2][1][RTW89_CHILE][2] = 18, + [2][1][RTW89_UKRAINE][2] = 32, + [2][1][RTW89_MEXICO][2] = 50, + [2][1][RTW89_CN][2] = 32, + [2][1][RTW89_QATAR][2] = 32, + [2][1][RTW89_FCC][4] = 58, + [2][1][RTW89_ETSI][4] = 32, + [2][1][RTW89_MKK][4] = 38, + [2][1][RTW89_IC][4] = 30, + [2][1][RTW89_KCC][4] = 54, + [2][1][RTW89_ACMA][4] = 32, + [2][1][RTW89_CHILE][4] = 18, + [2][1][RTW89_UKRAINE][4] = 32, + [2][1][RTW89_MEXICO][4] = 50, + [2][1][RTW89_CN][4] = 32, + [2][1][RTW89_QATAR][4] = 32, + [2][1][RTW89_FCC][6] = 58, + [2][1][RTW89_ETSI][6] = 32, + [2][1][RTW89_MKK][6] = 38, + [2][1][RTW89_IC][6] = 30, + [2][1][RTW89_KCC][6] = 26, + [2][1][RTW89_ACMA][6] = 32, + [2][1][RTW89_CHILE][6] = 18, + [2][1][RTW89_UKRAINE][6] = 32, + [2][1][RTW89_MEXICO][6] = 50, + [2][1][RTW89_CN][6] = 32, + [2][1][RTW89_QATAR][6] = 32, + [2][1][RTW89_FCC][8] = 58, + [2][1][RTW89_ETSI][8] = 32, + [2][1][RTW89_MKK][8] = 38, + [2][1][RTW89_IC][8] = 52, + [2][1][RTW89_KCC][8] = 54, + [2][1][RTW89_ACMA][8] = 32, + [2][1][RTW89_CHILE][8] = 42, + [2][1][RTW89_UKRAINE][8] = 32, + [2][1][RTW89_MEXICO][8] = 58, + [2][1][RTW89_CN][8] = 32, + [2][1][RTW89_QATAR][8] = 32, + [2][1][RTW89_FCC][10] = 58, + [2][1][RTW89_ETSI][10] = 32, + [2][1][RTW89_MKK][10] = 38, + [2][1][RTW89_IC][10] = 52, + [2][1][RTW89_KCC][10] = 54, + [2][1][RTW89_ACMA][10] = 32, + [2][1][RTW89_CHILE][10] = 42, + [2][1][RTW89_UKRAINE][10] = 32, + [2][1][RTW89_MEXICO][10] = 58, + [2][1][RTW89_CN][10] = 32, + [2][1][RTW89_QATAR][10] = 32, + [2][1][RTW89_FCC][12] = 58, + [2][1][RTW89_ETSI][12] = 32, + [2][1][RTW89_MKK][12] = 38, + [2][1][RTW89_IC][12] = 52, + [2][1][RTW89_KCC][12] = 54, + [2][1][RTW89_ACMA][12] = 32, + [2][1][RTW89_CHILE][12] = 42, + [2][1][RTW89_UKRAINE][12] = 32, + [2][1][RTW89_MEXICO][12] = 58, + [2][1][RTW89_CN][12] = 32, + [2][1][RTW89_QATAR][12] = 32, + [2][1][RTW89_FCC][14] = 58, + [2][1][RTW89_ETSI][14] = 32, + [2][1][RTW89_MKK][14] = 38, + [2][1][RTW89_IC][14] = 52, + [2][1][RTW89_KCC][14] = 54, + [2][1][RTW89_ACMA][14] = 32, + [2][1][RTW89_CHILE][14] = 42, + [2][1][RTW89_UKRAINE][14] = 32, + [2][1][RTW89_MEXICO][14] = 58, + [2][1][RTW89_CN][14] = 32, + [2][1][RTW89_QATAR][14] = 32, + [2][1][RTW89_FCC][15] = 58, + [2][1][RTW89_ETSI][15] = 32, + [2][1][RTW89_MKK][15] = 58, + [2][1][RTW89_IC][15] = 58, + [2][1][RTW89_KCC][15] = 54, + [2][1][RTW89_ACMA][15] = 32, + [2][1][RTW89_CHILE][15] = 42, + [2][1][RTW89_UKRAINE][15] = 32, + [2][1][RTW89_MEXICO][15] = 58, + [2][1][RTW89_CN][15] = 127, + [2][1][RTW89_QATAR][15] = 32, + [2][1][RTW89_FCC][17] = 58, + [2][1][RTW89_ETSI][17] = 32, + [2][1][RTW89_MKK][17] = 58, + [2][1][RTW89_IC][17] = 58, + [2][1][RTW89_KCC][17] = 54, + [2][1][RTW89_ACMA][17] = 32, + [2][1][RTW89_CHILE][17] = 42, + [2][1][RTW89_UKRAINE][17] = 32, + [2][1][RTW89_MEXICO][17] = 58, + [2][1][RTW89_CN][17] = 127, + [2][1][RTW89_QATAR][17] = 32, + [2][1][RTW89_FCC][19] = 58, + [2][1][RTW89_ETSI][19] = 32, + [2][1][RTW89_MKK][19] = 58, + [2][1][RTW89_IC][19] = 58, + [2][1][RTW89_KCC][19] = 54, + [2][1][RTW89_ACMA][19] = 32, + [2][1][RTW89_CHILE][19] = 42, + [2][1][RTW89_UKRAINE][19] = 32, + [2][1][RTW89_MEXICO][19] = 58, + [2][1][RTW89_CN][19] = 127, + [2][1][RTW89_QATAR][19] = 32, + [2][1][RTW89_FCC][21] = 58, + [2][1][RTW89_ETSI][21] = 32, + [2][1][RTW89_MKK][21] = 58, + [2][1][RTW89_IC][21] = 58, + [2][1][RTW89_KCC][21] = 54, + [2][1][RTW89_ACMA][21] = 32, + [2][1][RTW89_CHILE][21] = 42, + [2][1][RTW89_UKRAINE][21] = 32, + [2][1][RTW89_MEXICO][21] = 58, + [2][1][RTW89_CN][21] = 127, + [2][1][RTW89_QATAR][21] = 32, + [2][1][RTW89_FCC][23] = 58, + [2][1][RTW89_ETSI][23] = 32, + [2][1][RTW89_MKK][23] = 58, + [2][1][RTW89_IC][23] = 58, + [2][1][RTW89_KCC][23] = 54, + [2][1][RTW89_ACMA][23] = 32, + [2][1][RTW89_CHILE][23] = 42, + [2][1][RTW89_UKRAINE][23] = 32, + [2][1][RTW89_MEXICO][23] = 58, + [2][1][RTW89_CN][23] = 127, + [2][1][RTW89_QATAR][23] = 32, + [2][1][RTW89_FCC][25] = 58, + [2][1][RTW89_ETSI][25] = 32, + [2][1][RTW89_MKK][25] = 58, + [2][1][RTW89_IC][25] = 127, + [2][1][RTW89_KCC][25] = 54, + [2][1][RTW89_ACMA][25] = 127, + [2][1][RTW89_CHILE][25] = 42, + [2][1][RTW89_UKRAINE][25] = 32, + [2][1][RTW89_MEXICO][25] = 58, + [2][1][RTW89_CN][25] = 127, + [2][1][RTW89_QATAR][25] = 32, + [2][1][RTW89_FCC][27] = 58, + [2][1][RTW89_ETSI][27] = 32, + [2][1][RTW89_MKK][27] = 58, + [2][1][RTW89_IC][27] = 127, + [2][1][RTW89_KCC][27] = 54, + [2][1][RTW89_ACMA][27] = 127, + [2][1][RTW89_CHILE][27] = 42, + [2][1][RTW89_UKRAINE][27] = 32, + [2][1][RTW89_MEXICO][27] = 58, + [2][1][RTW89_CN][27] = 127, + [2][1][RTW89_QATAR][27] = 32, + [2][1][RTW89_FCC][29] = 58, + [2][1][RTW89_ETSI][29] = 32, + [2][1][RTW89_MKK][29] = 58, + [2][1][RTW89_IC][29] = 127, + [2][1][RTW89_KCC][29] = 54, + [2][1][RTW89_ACMA][29] = 127, + [2][1][RTW89_CHILE][29] = 42, + [2][1][RTW89_UKRAINE][29] = 32, + [2][1][RTW89_MEXICO][29] = 58, + [2][1][RTW89_CN][29] = 127, + [2][1][RTW89_QATAR][29] = 32, + [2][1][RTW89_FCC][31] = 58, + [2][1][RTW89_ETSI][31] = 32, + [2][1][RTW89_MKK][31] = 58, + [2][1][RTW89_IC][31] = 58, + [2][1][RTW89_KCC][31] = 54, + [2][1][RTW89_ACMA][31] = 32, + [2][1][RTW89_CHILE][31] = 42, + [2][1][RTW89_UKRAINE][31] = 32, + [2][1][RTW89_MEXICO][31] = 58, + [2][1][RTW89_CN][31] = 127, + [2][1][RTW89_QATAR][31] = 32, + [2][1][RTW89_FCC][33] = 58, + [2][1][RTW89_ETSI][33] = 32, + [2][1][RTW89_MKK][33] = 58, + [2][1][RTW89_IC][33] = 58, + [2][1][RTW89_KCC][33] = 54, + [2][1][RTW89_ACMA][33] = 32, + [2][1][RTW89_CHILE][33] = 42, + [2][1][RTW89_UKRAINE][33] = 32, + [2][1][RTW89_MEXICO][33] = 58, + [2][1][RTW89_CN][33] = 127, + [2][1][RTW89_QATAR][33] = 32, + [2][1][RTW89_FCC][35] = 58, + [2][1][RTW89_ETSI][35] = 32, + [2][1][RTW89_MKK][35] = 58, + [2][1][RTW89_IC][35] = 58, + [2][1][RTW89_KCC][35] = 54, + [2][1][RTW89_ACMA][35] = 32, + [2][1][RTW89_CHILE][35] = 42, + [2][1][RTW89_UKRAINE][35] = 32, + [2][1][RTW89_MEXICO][35] = 58, + [2][1][RTW89_CN][35] = 127, + [2][1][RTW89_QATAR][35] = 32, + [2][1][RTW89_FCC][37] = 58, + [2][1][RTW89_ETSI][37] = 127, + [2][1][RTW89_MKK][37] = 58, + [2][1][RTW89_IC][37] = 58, + [2][1][RTW89_KCC][37] = 54, + [2][1][RTW89_ACMA][37] = 62, + [2][1][RTW89_CHILE][37] = 42, + [2][1][RTW89_UKRAINE][37] = 127, + [2][1][RTW89_MEXICO][37] = 58, + [2][1][RTW89_CN][37] = 127, + [2][1][RTW89_QATAR][37] = 127, + [2][1][RTW89_FCC][38] = 76, + [2][1][RTW89_ETSI][38] = 16, + [2][1][RTW89_MKK][38] = 127, + [2][1][RTW89_IC][38] = 76, + [2][1][RTW89_KCC][38] = 54, + [2][1][RTW89_ACMA][38] = 76, + [2][1][RTW89_CHILE][38] = 42, + [2][1][RTW89_UKRAINE][38] = 16, + [2][1][RTW89_MEXICO][38] = 76, + [2][1][RTW89_CN][38] = 64, + [2][1][RTW89_QATAR][38] = 16, + [2][1][RTW89_FCC][40] = 76, + [2][1][RTW89_ETSI][40] = 16, + [2][1][RTW89_MKK][40] = 127, + [2][1][RTW89_IC][40] = 76, + [2][1][RTW89_KCC][40] = 54, + [2][1][RTW89_ACMA][40] = 76, + [2][1][RTW89_CHILE][40] = 42, + [2][1][RTW89_UKRAINE][40] = 16, + [2][1][RTW89_MEXICO][40] = 76, + [2][1][RTW89_CN][40] = 64, + [2][1][RTW89_QATAR][40] = 16, + [2][1][RTW89_FCC][42] = 76, + [2][1][RTW89_ETSI][42] = 16, + [2][1][RTW89_MKK][42] = 127, + [2][1][RTW89_IC][42] = 76, + [2][1][RTW89_KCC][42] = 54, + [2][1][RTW89_ACMA][42] = 76, + [2][1][RTW89_CHILE][42] = 42, + [2][1][RTW89_UKRAINE][42] = 16, + [2][1][RTW89_MEXICO][42] = 76, + [2][1][RTW89_CN][42] = 64, + [2][1][RTW89_QATAR][42] = 16, + [2][1][RTW89_FCC][44] = 76, + [2][1][RTW89_ETSI][44] = 16, + [2][1][RTW89_MKK][44] = 127, + [2][1][RTW89_IC][44] = 76, + [2][1][RTW89_KCC][44] = 54, + [2][1][RTW89_ACMA][44] = 76, + [2][1][RTW89_CHILE][44] = 42, + [2][1][RTW89_UKRAINE][44] = 16, + [2][1][RTW89_MEXICO][44] = 76, + [2][1][RTW89_CN][44] = 64, + [2][1][RTW89_QATAR][44] = 16, + [2][1][RTW89_FCC][46] = 76, + [2][1][RTW89_ETSI][46] = 16, + [2][1][RTW89_MKK][46] = 127, + [2][1][RTW89_IC][46] = 76, + [2][1][RTW89_KCC][46] = 54, + [2][1][RTW89_ACMA][46] = 76, + [2][1][RTW89_CHILE][46] = 42, + [2][1][RTW89_UKRAINE][46] = 16, + [2][1][RTW89_MEXICO][46] = 76, + [2][1][RTW89_CN][46] = 64, + [2][1][RTW89_QATAR][46] = 16, }; #define DECLARE_DIG_TABLE(name) \ diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index f1bf71e6c608..5d1490fc32db 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -23,6 +23,7 @@ #include "rsi_common.h" #include "rsi_coex.h" #include "rsi_hal.h" +#include "rsi_usb.h" u32 rsi_zone_enabled = /* INFO_ZONE | INIT_ZONE | @@ -168,6 +169,9 @@ int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len) frame_desc = &rx_pkt[index]; actual_length = *(u16 *)&frame_desc[0]; offset = *(u16 *)&frame_desc[2]; + if (!rcv_pkt_len && offset > + RSI_MAX_RX_USB_PKT_SIZE - FRAME_DESC_SZ) + goto fail; queueno = rsi_get_queueno(frame_desc, offset); length = rsi_get_length(frame_desc, offset); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 6821ea991895..66fe386ec9cc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -269,8 +269,12 @@ static void rsi_rx_done_handler(struct urb *urb) struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)rx_cb->data; int status = -EINVAL; + if (!rx_cb->rx_skb) + return; + if (urb->status) { dev_kfree_skb(rx_cb->rx_skb); + rx_cb->rx_skb = NULL; return; } @@ -294,8 +298,10 @@ out: if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num, GFP_ATOMIC)) rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__); - if (status) + if (status) { dev_kfree_skb(rx_cb->rx_skb); + rx_cb->rx_skb = NULL; + } } static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num) @@ -324,7 +330,6 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags) struct sk_buff *skb; u8 dword_align_bytes = 0; -#define RSI_MAX_RX_USB_PKT_SIZE 3000 skb = dev_alloc_skb(RSI_MAX_RX_USB_PKT_SIZE); if (!skb) return -ENOMEM; diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 254d19b66412..961851748bc4 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -44,6 +44,8 @@ #define RSI_USB_BUF_SIZE 4096 #define RSI_USB_CTRL_BUF_SIZE 0x04 +#define RSI_MAX_RX_USB_PKT_SIZE 3000 + struct rx_usb_ctrl_block { u8 *data; struct urb *rx_urb; diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 9fd8cf2d270c..72fc41ac83c0 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -26,7 +26,7 @@ #include "wl12xx_80211.h" #include "io.h" -static bool dump = false; +static bool dump; struct wl12xx_sdio_glue { struct device *dev; diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig index bdb2b0e46c12..609fd4a2c865 100644 --- a/drivers/net/wwan/Kconfig +++ b/drivers/net/wwan/Kconfig @@ -16,6 +16,17 @@ config WWAN if WWAN +config WWAN_DEBUGFS + bool "WWAN devices debugfs interface" if EXPERT + depends on DEBUG_FS + default y + help + Enables debugfs infrastructure for the WWAN core and device drivers. + + If this option is selected, then you can find the debug interface + elements for each WWAN device in a directory that is corresponding to + the device name: debugfs/wwan/wwanX. + config WWAN_HWSIM tristate "Simulated WWAN device" help @@ -85,6 +96,7 @@ config IOSM tristate "IOSM Driver for Intel M.2 WWAN Device" depends on INTEL_IOMMU select NET_DEVLINK + select RELAY if WWAN_DEBUGFS help This driver enables Intel M.2 WWAN Device communication. diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile index 5c2528beca2a..fa8d6afd18e1 100644 --- a/drivers/net/wwan/iosm/Makefile +++ b/drivers/net/wwan/iosm/Makefile @@ -21,7 +21,10 @@ iosm-y = \ iosm_ipc_mux_codec.o \ iosm_ipc_devlink.o \ iosm_ipc_flash.o \ - iosm_ipc_coredump.o \ + iosm_ipc_coredump.o + +iosm-$(CONFIG_WWAN_DEBUGFS) += \ + iosm_ipc_debugfs.o \ iosm_ipc_trace.o obj-$(CONFIG_IOSM) := iosm.o diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c new file mode 100644 index 000000000000..f2f57751a7d2 --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#include <linux/debugfs.h> +#include <linux/wwan.h> + +#include "iosm_ipc_imem.h" +#include "iosm_ipc_trace.h" +#include "iosm_ipc_debugfs.h" + +void ipc_debugfs_init(struct iosm_imem *ipc_imem) +{ + struct dentry *debugfs_pdev = wwan_get_debugfs_dir(ipc_imem->dev); + + ipc_imem->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, + debugfs_pdev); + + ipc_imem->trace = ipc_trace_init(ipc_imem); + if (!ipc_imem->trace) + dev_warn(ipc_imem->dev, "trace channel init failed"); +} + +void ipc_debugfs_deinit(struct iosm_imem *ipc_imem) +{ + ipc_trace_deinit(ipc_imem->trace); + debugfs_remove_recursive(ipc_imem->debugfs_dir); +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.h b/drivers/net/wwan/iosm/iosm_ipc_debugfs.h new file mode 100644 index 000000000000..8a84bfa2c14a --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#ifndef IOSM_IPC_DEBUGFS_H +#define IOSM_IPC_DEBUGFS_H + +#ifdef CONFIG_WWAN_DEBUGFS +void ipc_debugfs_init(struct iosm_imem *ipc_imem); +void ipc_debugfs_deinit(struct iosm_imem *ipc_imem); +#else +static inline void ipc_debugfs_init(struct iosm_imem *ipc_imem) {} +static inline void ipc_debugfs_deinit(struct iosm_imem *ipc_imem) {} +#endif + +#endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c index 1be07114c85d..f9e8e0ee4de3 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c @@ -11,6 +11,7 @@ #include "iosm_ipc_imem.h" #include "iosm_ipc_port.h" #include "iosm_ipc_trace.h" +#include "iosm_ipc_debugfs.h" /* Check the wwan ips if it is valid with Channel as input. */ static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl) @@ -133,7 +134,6 @@ static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem, * for channel alloc function. */ cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID; - cfg->nr_sessions = IPC_MEM_MUX_IP_SESSION_ENTRIES; return 0; } @@ -182,9 +182,9 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer) bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem) { struct ipc_mem_channel *channel; + bool hpda_ctrl_pending = false; struct sk_buff_head *ul_list; bool hpda_pending = false; - bool forced_hpdu = false; struct ipc_pipe *pipe; int i; @@ -201,15 +201,19 @@ bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem) ul_list = &channel->ul_list; /* Fill the transfer descriptor with the uplink buffer info. */ - hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol, + if (!ipc_imem_check_wwan_ips(channel)) { + hpda_ctrl_pending |= + ipc_protocol_ul_td_send(ipc_imem->ipc_protocol, pipe, ul_list); - - /* forced HP update needed for non data channels */ - if (hpda_pending && !ipc_imem_check_wwan_ips(channel)) - forced_hpdu = true; + } else { + hpda_pending |= + ipc_protocol_ul_td_send(ipc_imem->ipc_protocol, + pipe, ul_list); + } } - if (forced_hpdu) { + /* forced HP update needed for non data channels */ + if (hpda_ctrl_pending) { hpda_pending = false; ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, IPC_HP_UL_WRITE_TD); @@ -272,8 +276,8 @@ static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem, if (port_id == IPC_MEM_CTRL_CHL_ID_7) ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb); - else if (port_id == ipc_imem->trace->chl_id) - ipc_trace_port_rx(ipc_imem->trace, skb); + else if (ipc_is_trace_channel(ipc_imem, port_id)) + ipc_trace_port_rx(ipc_imem, skb); else wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, skb); @@ -533,6 +537,9 @@ static void ipc_imem_run_state_worker(struct work_struct *instance) return; } + if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag)) + ipc_devlink_deinit(ipc_imem->ipc_devlink); + if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg)) ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem); @@ -554,11 +561,7 @@ static void ipc_imem_run_state_worker(struct work_struct *instance) ctrl_chl_idx++; } - ipc_imem->trace = ipc_imem_trace_channel_init(ipc_imem); - if (!ipc_imem->trace) { - dev_err(ipc_imem->dev, "trace channel init failed"); - return; - } + ipc_debugfs_init(ipc_imem); ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0, false); @@ -1175,12 +1178,12 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem) if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) { ipc_mux_deinit(ipc_imem->mux); - ipc_trace_deinit(ipc_imem->trace); + ipc_debugfs_deinit(ipc_imem); ipc_wwan_deinit(ipc_imem->wwan); ipc_port_deinit(ipc_imem->ipc_port); } - if (ipc_imem->ipc_devlink) + if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag)) ipc_devlink_deinit(ipc_imem->ipc_devlink); ipc_imem_device_ipc_uninit(ipc_imem); @@ -1276,7 +1279,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, ipc_imem->pci_device_id = device_id; - ipc_imem->ev_cdev_write_pending = false; ipc_imem->cp_version = 0; ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP; @@ -1344,6 +1346,8 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, if (ipc_flash_link_establish(ipc_imem)) goto devlink_channel_fail; + + set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag); } return ipc_imem; devlink_channel_fail: diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h index cec38009c44a..98554e9beb01 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h @@ -101,6 +101,7 @@ struct ipc_chnl_cfg; #define IOSM_CHIP_INFO_SIZE_MAX 100 #define FULLY_FUNCTIONAL 0 +#define IOSM_DEVLINK_INIT 1 /* List of the supported UL/DL pipes. */ enum ipc_mem_pipes { @@ -336,11 +337,10 @@ enum ipc_phase { * process the irq actions. * @flag: Flag to monitor the state of driver * @td_update_timer_suspended: if true then td update timer suspend - * @ev_cdev_write_pending: 0 means inform the IPC tasklet to pass - * the accumulated uplink buffers to CP. * @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass * @reset_det_n: Reset detect flag * @pcie_wake_n: Pcie wake flag + * @debugfs_dir: Debug FS directory for driver-specific entries */ struct iosm_imem { struct iosm_mmio *mmio; @@ -350,7 +350,9 @@ struct iosm_imem { struct iosm_mux *mux; struct iosm_cdev *ipc_port[IPC_MEM_MAX_CHANNELS]; struct iosm_pcie *pcie; +#ifdef CONFIG_WWAN_DEBUGFS struct iosm_trace *trace; +#endif struct device *dev; enum ipc_mem_device_ipc_state ipc_requested_state; struct ipc_mem_channel channels[IPC_MEM_MAX_CHANNELS]; @@ -376,10 +378,12 @@ struct iosm_imem { u8 ev_irq_pending[IPC_IRQ_VECTORS]; unsigned long flag; u8 td_update_timer_suspended:1, - ev_cdev_write_pending:1, ev_mux_net_transmit_pending:1, reset_det_n:1, pcie_wake_n:1; +#ifdef CONFIG_WWAN_DEBUGFS + struct dentry *debugfs_dir; +#endif }; /** diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index 43f1796a8984..57304a5adf68 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -11,7 +11,6 @@ #include "iosm_ipc_imem_ops.h" #include "iosm_ipc_port.h" #include "iosm_ipc_task_queue.h" -#include "iosm_ipc_trace.h" /* Open a packet data online channel between the network layer and CP. */ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id) @@ -42,7 +41,6 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id, static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg, void *msg, size_t size) { - ipc_imem->ev_cdev_write_pending = false; ipc_imem_ul_send(ipc_imem); return 0; @@ -51,11 +49,6 @@ static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg, /* Through tasklet to do sio write. */ static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem) { - if (ipc_imem->ev_cdev_write_pending) - return -1; - - ipc_imem->ev_cdev_write_pending = true; - return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0, NULL, 0, false); } @@ -108,23 +101,6 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, "failed to register the ipc_wwan interfaces"); } -/** - * ipc_imem_trace_channel_init - Initializes trace channel. - * @ipc_imem: Pointer to iosm_imem struct. - * - * Returns: Pointer to trace instance on success else NULL - */ -struct iosm_trace *ipc_imem_trace_channel_init(struct iosm_imem *ipc_imem) -{ - struct ipc_chnl_cfg chnl_cfg = { 0 }; - - ipc_chnl_cfg_get(&chnl_cfg, IPC_MEM_CTRL_CHL_ID_3); - ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, chnl_cfg, - IRQ_MOD_OFF); - - return ipc_trace_init(ipc_imem); -} - /* Map SKB to DMA for transfer */ static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem, struct sk_buff *skb) @@ -471,6 +447,7 @@ void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink) /* Release the pipe resources */ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe); ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe); + ipc_imem->nr_of_channels--; } void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink, diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h index e36ee2782629..f8afb217d9e2 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h @@ -141,5 +141,5 @@ int ipc_imem_sys_devlink_read(struct iosm_devlink *ipc_devlink, u8 *data, */ int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink, unsigned char *buf, int count); -struct iosm_trace *ipc_imem_trace_channel_init(struct iosm_imem *ipc_imem); + #endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c index 09f94c123531..f09e5e77a2a5 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c @@ -192,7 +192,7 @@ void ipc_mmio_config(struct iosm_mmio *ipc_mmio) iowrite64(0, ipc_mmio->base + ipc_mmio->offset.ap_win_end); iowrite64(ipc_mmio->context_info_addr, - ipc_mmio->base + ipc_mmio->offset.context_info); + ipc_mmio->base + ipc_mmio->offset.context_info); } void ipc_mmio_set_psi_addr_and_size(struct iosm_mmio *ipc_mmio, dma_addr_t addr, diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c index c1c77ce699da..8e66ffe92055 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c @@ -97,7 +97,7 @@ static bool ipc_mux_session_open(struct iosm_mux *ipc_mux, /* Search for a free session interface id. */ if_id = le32_to_cpu(session_open->if_id); - if (if_id < 0 || if_id >= ipc_mux->nr_sessions) { + if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { dev_err(ipc_mux->dev, "invalid interface id=%d", if_id); return false; } @@ -129,6 +129,7 @@ static bool ipc_mux_session_open(struct iosm_mux *ipc_mux, /* Save and return the assigned if id. */ session_open->if_id = cpu_to_le32(if_id); + ipc_mux->nr_sessions++; return true; } @@ -151,7 +152,7 @@ static void ipc_mux_session_close(struct iosm_mux *ipc_mux, /* Copy the session interface id. */ if_id = le32_to_cpu(msg->if_id); - if (if_id < 0 || if_id >= ipc_mux->nr_sessions) { + if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { dev_err(ipc_mux->dev, "invalid session id %d", if_id); return; } @@ -170,6 +171,7 @@ static void ipc_mux_session_close(struct iosm_mux *ipc_mux, ipc_mux->session[if_id].flow_ctl_mask = 0; ipc_mux_session_reset(ipc_mux, if_id); + ipc_mux->nr_sessions--; } static void ipc_mux_channel_close(struct iosm_mux *ipc_mux, @@ -178,7 +180,7 @@ static void ipc_mux_channel_close(struct iosm_mux *ipc_mux, int i; /* Free pending session UL packet. */ - for (i = 0; i < ipc_mux->nr_sessions; i++) + for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) if (ipc_mux->session[i].wwan) ipc_mux_session_reset(ipc_mux, i); @@ -244,6 +246,11 @@ static int ipc_mux_schedule(struct iosm_mux *ipc_mux, union mux_msg *msg) /* Release an IP session. */ ipc_mux->event = MUX_E_MUX_SESSION_CLOSE; ipc_mux_session_close(ipc_mux, &msg->session_close); + if (!ipc_mux->nr_sessions) { + ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE; + ipc_mux_channel_close(ipc_mux, + &msg->channel_close); + } ret = ipc_mux->channel_id; goto out; @@ -281,7 +288,6 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg, ipc_mux->protocol = mux_cfg->protocol; ipc_mux->ul_flow = mux_cfg->ul_flow; - ipc_mux->nr_sessions = mux_cfg->nr_sessions; ipc_mux->instance_id = mux_cfg->instance_id; ipc_mux->wwan_q_offset = 0; @@ -340,7 +346,7 @@ static void ipc_mux_restart_tx_for_all_sessions(struct iosm_mux *ipc_mux) struct mux_session *session; int idx; - for (idx = 0; idx < ipc_mux->nr_sessions; idx++) { + for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) { session = &ipc_mux->session[idx]; if (!session->wwan) @@ -365,7 +371,7 @@ static void ipc_mux_stop_netif_for_all_sessions(struct iosm_mux *ipc_mux) struct mux_session *session; int idx; - for (idx = 0; idx < ipc_mux->nr_sessions; idx++) { + for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) { session = &ipc_mux->session[idx]; if (!session->wwan) @@ -387,7 +393,7 @@ void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux) int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux) { - return ipc_mux ? ipc_mux->nr_sessions : -EFAULT; + return ipc_mux ? IPC_MEM_MUX_IP_SESSION_ENTRIES : -EFAULT; } enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux) @@ -435,9 +441,11 @@ void ipc_mux_deinit(struct iosm_mux *ipc_mux) return; ipc_mux_stop_netif_for_all_sessions(ipc_mux); - channel_close = &mux_msg.channel_close; - channel_close->event = MUX_E_MUX_CHANNEL_CLOSE; - ipc_mux_schedule(ipc_mux, &mux_msg); + if (ipc_mux->state == MUX_S_ACTIVE) { + channel_close = &mux_msg.channel_close; + channel_close->event = MUX_E_MUX_CHANNEL_CLOSE; + ipc_mux_schedule(ipc_mux, &mux_msg); + } /* Empty the ADB free list. */ free_list = &ipc_mux->ul_adb.free_list; diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h index ddd2cd0bd911..88debaa1ed31 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h @@ -278,7 +278,6 @@ struct iosm_mux { struct ipc_mux_config { enum ipc_mux_protocol protocol; enum ipc_mux_ul_flow ul_flow; - int nr_sessions; int instance_id; }; diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c index bdb2d32cdb6d..40fb54a0513e 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c @@ -175,7 +175,7 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux, switch (le32_to_cpu(cmdh->command_type)) { case MUX_LITE_CMD_FLOW_CTL: - if (cmdh->if_id >= ipc_mux->nr_sessions) { + if (cmdh->if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { dev_err(ipc_mux->dev, "if_id [%d] not valid", cmdh->if_id); return -EINVAL; /* No session interface id. */ @@ -307,13 +307,13 @@ static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux, } if_id = fct->if_id; - if (if_id >= ipc_mux->nr_sessions) { + if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { dev_err(ipc_mux->dev, "not supported if_id: %d", if_id); return; } /* Is the session active ? */ - if_id = array_index_nospec(if_id, ipc_mux->nr_sessions); + if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES); wwan = ipc_mux->session[if_id].wwan; if (!wwan) { dev_err(ipc_mux->dev, "session Net ID is NULL"); @@ -355,13 +355,13 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux, } if_id = adgh->if_id; - if (if_id >= ipc_mux->nr_sessions) { + if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id); return; } /* Is the session active ? */ - if_id = array_index_nospec(if_id, ipc_mux->nr_sessions); + if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES); wwan = ipc_mux->session[if_id].wwan; if (!wwan) { dev_err(ipc_mux->dev, "session Net ID is NULL"); @@ -538,7 +538,7 @@ static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux) struct mux_session *session; int idx; - for (idx = 0; idx < ipc_mux->nr_sessions; idx++) { + for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) { session = &ipc_mux->session[idx]; if (!session->wwan) @@ -563,7 +563,7 @@ static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux) qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) + MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl); - for (i = 0; i < ipc_mux->nr_sessions; i++) { + for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) { session = &ipc_mux->session[i]; if (!session->wwan || session->flow_ctl_mask) @@ -777,13 +777,13 @@ bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux) ipc_mux->adb_prep_ongoing = true; - for (i = 0; i < ipc_mux->nr_sessions; i++) { + for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) { session_id = ipc_mux->rr_next_session; session = &ipc_mux->session[session_id]; /* Go to next handle rr_next_session overflow */ ipc_mux->rr_next_session++; - if (ipc_mux->rr_next_session >= ipc_mux->nr_sessions) + if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES) ipc_mux->rr_next_session = 0; if (!session->wwan || session->flow_ctl_mask || diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.c b/drivers/net/wwan/iosm/iosm_ipc_trace.c index c5fa12599c2b..eeecfa3d10c5 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_trace.c +++ b/drivers/net/wwan/iosm/iosm_ipc_trace.c @@ -17,11 +17,13 @@ /** * ipc_trace_port_rx - Receive trace packet from cp and write to relay buffer - * @ipc_trace: Pointer to the ipc trace data-struct + * @ipc_imem: Pointer to iosm_imem structure * @skb: Pointer to struct sk_buff */ -void ipc_trace_port_rx(struct iosm_trace *ipc_trace, struct sk_buff *skb) +void ipc_trace_port_rx(struct iosm_imem *ipc_imem, struct sk_buff *skb) { + struct iosm_trace *ipc_trace = ipc_imem->trace; + if (ipc_trace->ipc_rchan) relay_write(ipc_trace->ipc_rchan, skb->data, skb->len); @@ -132,9 +134,14 @@ static const struct file_operations ipc_trace_fops = { */ struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem) { - struct iosm_trace *ipc_trace = kzalloc(sizeof(*ipc_trace), GFP_KERNEL); - struct dentry *debugfs_pdev; + struct ipc_chnl_cfg chnl_cfg = { 0 }; + struct iosm_trace *ipc_trace; + + ipc_chnl_cfg_get(&chnl_cfg, IPC_MEM_CTRL_CHL_ID_3); + ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, chnl_cfg, + IRQ_MOD_OFF); + ipc_trace = kzalloc(sizeof(*ipc_trace), GFP_KERNEL); if (!ipc_trace) return NULL; @@ -144,15 +151,14 @@ struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem) ipc_trace->chl_id = IPC_MEM_CTRL_CHL_ID_3; mutex_init(&ipc_trace->trc_mutex); - debugfs_pdev = wwan_get_debugfs_dir(ipc_imem->dev); ipc_trace->ctrl_file = debugfs_create_file(IOSM_TRC_DEBUGFS_TRACE_CTRL, IOSM_TRC_FILE_PERM, - debugfs_pdev, + ipc_imem->debugfs_dir, ipc_trace, &ipc_trace_fops); ipc_trace->ipc_rchan = relay_open(IOSM_TRC_DEBUGFS_TRACE, - debugfs_pdev, + ipc_imem->debugfs_dir, IOSM_TRC_SUB_BUFF_SIZE, IOSM_TRC_N_SUB_BUFF, &relay_callbacks, NULL); @@ -166,6 +172,9 @@ struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem) */ void ipc_trace_deinit(struct iosm_trace *ipc_trace) { + if (!ipc_trace) + return; + debugfs_remove(ipc_trace->ctrl_file); relay_close(ipc_trace->ipc_rchan); mutex_destroy(&ipc_trace->trc_mutex); diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.h b/drivers/net/wwan/iosm/iosm_ipc_trace.h index 53346183af9c..5ebe7790585c 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_trace.h +++ b/drivers/net/wwan/iosm/iosm_ipc_trace.h @@ -45,7 +45,30 @@ struct iosm_trace { enum trace_ctrl_mode mode; }; +#ifdef CONFIG_WWAN_DEBUGFS + +static inline bool ipc_is_trace_channel(struct iosm_imem *ipc_mem, u16 chl_id) +{ + return ipc_mem->trace && ipc_mem->trace->chl_id == chl_id; +} + struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem); void ipc_trace_deinit(struct iosm_trace *ipc_trace); -void ipc_trace_port_rx(struct iosm_trace *ipc_trace, struct sk_buff *skb); +void ipc_trace_port_rx(struct iosm_imem *ipc_imem, struct sk_buff *skb); + +#else + +static inline bool ipc_is_trace_channel(struct iosm_imem *ipc_mem, u16 chl_id) +{ + return false; +} + +static inline void ipc_trace_port_rx(struct iosm_imem *ipc_imem, + struct sk_buff *skb) +{ + dev_kfree_skb(skb); +} + +#endif + #endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index b571d9cedba4..27151148c782 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -8,6 +8,7 @@ #include <linux/if_link.h> #include <linux/rtnetlink.h> #include <linux/wwan.h> +#include <net/pkt_sched.h> #include "iosm_ipc_chnl_cfg.h" #include "iosm_ipc_imem_ops.h" @@ -159,7 +160,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev) { iosm_dev->header_ops = NULL; iosm_dev->hard_header_len = 0; - iosm_dev->priv_flags |= IFF_NO_QUEUE; + iosm_dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; iosm_dev->type = ARPHRD_NONE; iosm_dev->mtu = ETH_DATA_LEN; diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.h b/drivers/net/wwan/iosm/iosm_ipc_wwan.h index 4925f22dff0a..a23e926398ff 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.h +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.h @@ -42,14 +42,4 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg, * */ void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int id, bool on); - -/** - * ipc_wwan_is_tx_stopped - Checks if Tx stopped for a Interface id. - * @ipc_wwan: Pointer to wwan instance - * @id: Ipc mux channel session id - * - * Return: true if stopped, false otherwise - */ -bool ipc_wwan_is_tx_stopped(struct iosm_wwan *ipc_wwan, int id); - #endif diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index 5bf62dc35ac7..1508dc2a497b 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -50,7 +50,9 @@ struct wwan_device { atomic_t port_id; const struct wwan_ops *ops; void *ops_ctxt; +#ifdef CONFIG_WWAN_DEBUGFS struct dentry *debugfs_dir; +#endif }; /** @@ -146,6 +148,7 @@ static struct wwan_device *wwan_dev_get_by_name(const char *name) return to_wwan_dev(dev); } +#ifdef CONFIG_WWAN_DEBUGFS struct dentry *wwan_get_debugfs_dir(struct device *parent) { struct wwan_device *wwandev; @@ -157,6 +160,7 @@ struct dentry *wwan_get_debugfs_dir(struct device *parent) return wwandev->debugfs_dir; } EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir); +#endif /* This function allocates and registers a new WWAN device OR if a WWAN device * already exist for the given parent, it gets a reference and return it. @@ -166,7 +170,6 @@ EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir); static struct wwan_device *wwan_create_dev(struct device *parent) { struct wwan_device *wwandev; - const char *wwandev_name; int err, id; /* The 'find-alloc-register' operation must be protected against @@ -206,9 +209,11 @@ static struct wwan_device *wwan_create_dev(struct device *parent) goto done_unlock; } - wwandev_name = kobject_name(&wwandev->dev.kobj); - wwandev->debugfs_dir = debugfs_create_dir(wwandev_name, - wwan_debugfs_dir); +#ifdef CONFIG_WWAN_DEBUGFS + wwandev->debugfs_dir = + debugfs_create_dir(kobject_name(&wwandev->dev.kobj), + wwan_debugfs_dir); +#endif done_unlock: mutex_unlock(&wwan_register_lock); @@ -240,7 +245,9 @@ static void wwan_remove_dev(struct wwan_device *wwandev) ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child); if (!ret) { +#ifdef CONFIG_WWAN_DEBUGFS debugfs_remove_recursive(wwandev->debugfs_dir); +#endif device_unregister(&wwandev->dev); } else { put_device(&wwandev->dev); @@ -1140,7 +1147,9 @@ static int __init wwan_init(void) goto destroy; } +#ifdef CONFIG_WWAN_DEBUGFS wwan_debugfs_dir = debugfs_create_dir("wwan", NULL); +#endif return 0; diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index c24dab383654..722dacdd5a17 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -19,6 +19,7 @@ #include <linux/platform_device.h> #include <linux/phy/phy.h> #include <linux/regulator/consumer.h> +#include <linux/module.h> #include "pcie-designware.h" diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index 7b17da2f9b3f..cfe66bf04c1d 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -18,6 +18,7 @@ #include <linux/pm_domain.h> #include <linux/regmap.h> #include <linux/reset.h> +#include <linux/module.h> #include "pcie-designware.h" diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index b7e50ed050a8..841c44cd64c2 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -76,7 +76,7 @@ #define AMD_CPU_ID_CZN AMD_CPU_ID_RN #define AMD_CPU_ID_YC 0x14B5 -#define PMC_MSG_DELAY_MIN_US 100 +#define PMC_MSG_DELAY_MIN_US 50 #define RESPONSE_REGISTER_LOOP_MAX 20000 #define SOC_SUBSYSTEM_IP_MAX 12 diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 08598942a6d7..13f8cf70b9ae 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -99,6 +99,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"), }, }, + { + .ident = "Microsoft Surface Go 3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), + }, + }, { } }; diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c index ae9293024c77..a91847a551a7 100644 --- a/drivers/platform/x86/lg-laptop.c +++ b/drivers/platform/x86/lg-laptop.c @@ -657,6 +657,18 @@ static int acpi_add(struct acpi_device *device) if (product && strlen(product) > 4) switch (product[4]) { case '5': + if (strlen(product) > 5) + switch (product[5]) { + case 'N': + year = 2021; + break; + case '0': + year = 2016; + break; + default: + year = 2022; + } + break; case '6': year = 2016; break; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index b3ac9c3f3b7c..bb1abb947e1e 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -3015,6 +3015,8 @@ static struct attribute *hotkey_attributes[] = { &dev_attr_hotkey_all_mask.attr, &dev_attr_hotkey_adaptive_all_mask.attr, &dev_attr_hotkey_recommended_mask.attr, + &dev_attr_hotkey_tablet_mode.attr, + &dev_attr_hotkey_radio_sw.attr, #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL &dev_attr_hotkey_source_mask.attr, &dev_attr_hotkey_poll_freq.attr, @@ -5726,11 +5728,11 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = { "tpacpi::standby", "tpacpi::dock_status1", "tpacpi::dock_status2", - "tpacpi::unknown_led2", + "tpacpi::lid_logo_dot", "tpacpi::unknown_led3", "tpacpi::thinkvantage", }; -#define TPACPI_SAFE_LEDS 0x1081U +#define TPACPI_SAFE_LEDS 0x1481U static inline bool tpacpi_is_led_restricted(const unsigned int led) { diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index fa8812039b82..17dd54d4b783 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -905,6 +905,16 @@ static const struct ts_dmi_data trekstor_primetab_t13b_data = { .properties = trekstor_primetab_t13b_props, }; +static const struct property_entry trekstor_surftab_duo_w1_props[] = { + PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), + { } +}; + +static const struct ts_dmi_data trekstor_surftab_duo_w1_data = { + .acpi_name = "GDIX1001:00", + .properties = trekstor_surftab_duo_w1_props, +}; + static const struct property_entry trekstor_surftab_twin_10_1_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 20), PROPERTY_ENTRY_U32("touchscreen-min-y", 0), @@ -1503,6 +1513,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* TrekStor SurfTab duo W1 10.1 ST10432-10b */ + .driver_data = (void *)&trekstor_surftab_duo_w1_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"), + DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"), + }, + }, + { /* TrekStor SurfTab twin 10.1 ST10432-8 */ .driver_data = (void *)&trekstor_surftab_twin_10_1_data, .matches = { diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c index b9fac786246a..2a5c1829aab7 100644 --- a/drivers/powercap/dtpm.c +++ b/drivers/powercap/dtpm.c @@ -463,17 +463,12 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent) static int __init init_dtpm(void) { - struct dtpm_descr *dtpm_descr; - pct = powercap_register_control_type(NULL, "dtpm", NULL); if (IS_ERR(pct)) { pr_err("Failed to register control type\n"); return PTR_ERR(pct); } - for_each_dtpm_table(dtpm_descr) - dtpm_descr->init(); - return 0; } late_initcall(init_dtpm); diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c index 6c7c2843ba0b..61f47fb9d997 100644 --- a/drivers/ptp/ptp_ines.c +++ b/drivers/ptp/ptp_ines.c @@ -338,10 +338,6 @@ static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - /* reserved for future extensions */ - if (cfg.flags) - return -EINVAL; - switch (cfg.tx_type) { case HWTSTAMP_TX_OFF: ts_stat_tx = 0; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 20dca4c0384a..de25d7ac41da 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -1030,8 +1030,6 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card, data, QETH_PROT_IPV6); } -int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb); - extern const struct qeth_discipline qeth_l2_discipline; extern const struct qeth_discipline qeth_l3_discipline; extern const struct ethtool_ops qeth_ethtool_ops; @@ -1099,6 +1097,8 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count); u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, u8 cast_type, struct net_device *sb_dev); +u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev); int qeth_open(struct net_device *dev); int qeth_stop(struct net_device *dev); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index d32aa8b705db..c296e0556f49 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3635,12 +3635,10 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || !atomic_read(&queue->set_pci_flags_count)) { unsigned int index, flush_cnt; - bool q_was_packing; spin_lock(&queue->lock); index = queue->next_buf_to_fill; - q_was_packing = queue->do_pack; flush_cnt = qeth_switch_to_nonpacking_if_needed(queue); if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) @@ -3648,8 +3646,7 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) if (flush_cnt) { qeth_flush_buffers(queue, index, flush_cnt); - if (q_was_packing) - QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); + QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); } spin_unlock(&queue->lock); @@ -3769,7 +3766,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, /* * Note: Function assumes that we have 4 outbound queues. */ -int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) +static int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) { struct vlan_ethhdr *veth = vlan_eth_hdr(skb); u8 tos; @@ -3814,7 +3811,6 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) } return card->qdio.default_out_queue; } -EXPORT_SYMBOL_GPL(qeth_get_priority_queue); /** * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. @@ -5575,29 +5571,9 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, #endif static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, - struct qeth_hdr *hdr, bool uses_frags) + bool uses_frags, bool is_cso) { struct napi_struct *napi = &card->napi; - bool is_cso; - - switch (hdr->hdr.l2.id) { -#if IS_ENABLED(CONFIG_QETH_L3) - case QETH_HEADER_TYPE_LAYER3: - qeth_l3_rebuild_skb(card, skb, hdr); - is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; - break; -#endif - case QETH_HEADER_TYPE_LAYER2: - is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; - break; - default: - /* never happens */ - if (uses_frags) - napi_free_frags(napi); - else - kfree_skb(skb); - return; - } if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) { skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -5654,6 +5630,7 @@ static int qeth_extract_skb(struct qeth_card *card, struct qeth_hdr *hdr; struct sk_buff *skb; int skb_len = 0; + bool is_cso; element = &buffer->element[*element_no]; @@ -5673,11 +5650,15 @@ next_packet: switch (hdr->hdr.l2.id) { case QETH_HEADER_TYPE_LAYER2: skb_len = hdr->hdr.l2.pkt_length; + is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; + linear_len = ETH_HLEN; headroom = 0; break; case QETH_HEADER_TYPE_LAYER3: skb_len = hdr->hdr.l3.length; + is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; + if (!IS_LAYER3(card)) { QETH_CARD_STAT_INC(card, rx_dropped_notsupp); goto walk_packet; @@ -5804,7 +5785,12 @@ walk_packet: *element_no = element - &buffer->element[0]; *__offset = offset; - qeth_receive_skb(card, skb, hdr, uses_frags); +#if IS_ENABLED(CONFIG_QETH_L3) + if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER3) + qeth_l3_rebuild_skb(card, skb, hdr); +#endif + + qeth_receive_skb(card, skb, uses_frags, is_cso); return 0; } @@ -7088,6 +7074,18 @@ u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); +u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct qeth_card *card = dev->ml_priv; + + if (qeth_uses_tx_prio_queueing(card)) + return qeth_get_priority_queue(card, skb); + + return netdev_pick_tx(dev, skb, sb_dev); +} +EXPORT_SYMBOL_GPL(qeth_osa_select_queue); + int qeth_open(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 0347fc184786..303461d70af3 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -519,19 +519,11 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } -static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev) +static u16 qeth_l2_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) { - struct qeth_card *card = dev->ml_priv; - - if (IS_IQD(card)) - return qeth_iqd_select_queue(dev, skb, - qeth_get_ether_cast_type(skb), - sb_dev); - if (qeth_uses_tx_prio_queueing(card)) - return qeth_get_priority_queue(card, skb); - - return netdev_pick_tx(dev, skb, sb_dev); + return qeth_iqd_select_queue(dev, skb, qeth_get_ether_cast_type(skb), + sb_dev); } static void qeth_l2_set_rx_mode(struct net_device *dev) @@ -726,7 +718,8 @@ struct qeth_l2_br2dev_event_work { unsigned char addr[ETH_ALEN]; }; -static const struct net_device_ops qeth_l2_netdev_ops; +static const struct net_device_ops qeth_l2_iqd_netdev_ops; +static const struct net_device_ops qeth_l2_osa_netdev_ops; static bool qeth_l2_must_learn(struct net_device *netdev, struct net_device *dstdev) @@ -738,7 +731,8 @@ static bool qeth_l2_must_learn(struct net_device *netdev, (priv->brport_features & BR_LEARNING_SYNC) && !(br_port_flag_is_set(netdev, BR_ISOLATED) && br_port_flag_is_set(dstdev, BR_ISOLATED)) && - netdev->netdev_ops == &qeth_l2_netdev_ops); + (netdev->netdev_ops == &qeth_l2_iqd_netdev_ops || + netdev->netdev_ops == &qeth_l2_osa_netdev_ops)); } /** @@ -1051,20 +1045,20 @@ static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, return rc; } -static const struct net_device_ops qeth_l2_netdev_ops = { +static const struct net_device_ops qeth_l2_iqd_netdev_ops = { .ndo_open = qeth_open, .ndo_stop = qeth_stop, .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l2_hard_start_xmit, .ndo_features_check = qeth_features_check, - .ndo_select_queue = qeth_l2_select_queue, + .ndo_select_queue = qeth_l2_iqd_select_queue, .ndo_validate_addr = qeth_l2_validate_addr, .ndo_set_rx_mode = qeth_l2_set_rx_mode, .ndo_eth_ioctl = qeth_do_ioctl, .ndo_siocdevprivate = qeth_siocdevprivate, - .ndo_set_mac_address = qeth_l2_set_mac_address, + .ndo_set_mac_address = qeth_l2_set_mac_address, .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid, + .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid, .ndo_tx_timeout = qeth_tx_timeout, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, @@ -1072,10 +1066,30 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_bridge_setlink = qeth_l2_bridge_setlink, }; +static const struct net_device_ops qeth_l2_osa_netdev_ops = { + .ndo_open = qeth_open, + .ndo_stop = qeth_stop, + .ndo_get_stats64 = qeth_get_stats64, + .ndo_start_xmit = qeth_l2_hard_start_xmit, + .ndo_features_check = qeth_features_check, + .ndo_select_queue = qeth_osa_select_queue, + .ndo_validate_addr = qeth_l2_validate_addr, + .ndo_set_rx_mode = qeth_l2_set_rx_mode, + .ndo_eth_ioctl = qeth_do_ioctl, + .ndo_siocdevprivate = qeth_siocdevprivate, + .ndo_set_mac_address = qeth_l2_set_mac_address, + .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid, + .ndo_tx_timeout = qeth_tx_timeout, + .ndo_fix_features = qeth_fix_features, + .ndo_set_features = qeth_set_features, +}; + static int qeth_l2_setup_netdev(struct qeth_card *card) { + card->dev->netdev_ops = IS_IQD(card) ? &qeth_l2_iqd_netdev_ops : + &qeth_l2_osa_netdev_ops; card->dev->needed_headroom = sizeof(struct qeth_hdr); - card->dev->netdev_ops = &qeth_l2_netdev_ops; card->dev->priv_flags |= IFF_UNICAST_FLT; if (IS_OSM(card)) { diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 48a886f7af62..9251ad276ee8 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1822,17 +1822,6 @@ static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, qeth_l3_get_cast_type(skb, proto), sb_dev); } -static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev) -{ - struct qeth_card *card = dev->ml_priv; - - if (qeth_uses_tx_prio_queueing(card)) - return qeth_get_priority_queue(card, skb); - - return netdev_pick_tx(dev, skb, sb_dev); -} - static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_open = qeth_open, .ndo_stop = qeth_stop, @@ -1854,7 +1843,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_features_check = qeth_l3_osa_features_check, - .ndo_select_queue = qeth_l3_osa_select_queue, + .ndo_select_queue = qeth_osa_select_queue, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_eth_ioctl = qeth_do_ioctl, diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b940e0268f96..e83453bea2ae 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -5095,14 +5095,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* NPort Recovery mode or node is just allocated */ if (!lpfc_nlp_not_used(ndlp)) { /* A LOGO is completing and the node is in NPR state. - * If this a fabric node that cleared its transport - * registration, release the rpi. + * Just unregister the RPI because the node is still + * required. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); lpfc_unreg_rpi(vport, ndlp); } else { /* Indicate the node has already released, should diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 27eb652b564f..81dab9b82f79 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -639,8 +639,8 @@ static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc) mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP; current_time = ktime_get_real(); TimeStamp = ktime_to_ms(current_time); - mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF); - mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32); + mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32); + mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); init_completion(&ioc->scsih_cmds.done); ioc->put_smid_default(ioc, smid); dinitprintk(ioc, ioc_info(ioc, diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index db6a759de1e9..a0af986633d2 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -142,6 +142,8 @@ #define MPT_MAX_CALLBACKS 32 +#define MPT_MAX_HBA_NUM_PHYS 32 + #define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ /* reserved for issuing internally framed scsi io cmds */ #define INTERNAL_SCSIIO_CMDS_COUNT 3 @@ -798,6 +800,7 @@ struct _sas_phy { * @enclosure_handle: handle for this a member of an enclosure * @device_info: bitwise defining capabilities of this sas_host/expander * @responding: used in _scsih_expander_device_mark_responding + * @nr_phys_allocated: Allocated memory for this many count phys * @phy: a list of phys that make up this sas_host/expander * @sas_port_list: list of ports attached to this sas_host/expander * @port: hba port entry containing node's port number info @@ -813,6 +816,7 @@ struct _sas_node { u16 enclosure_handle; u64 enclosure_logical_id; u8 responding; + u8 nr_phys_allocated; struct hba_port *port; struct _sas_phy *phy; struct list_head sas_port_list; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index cee7170beae8..00792767c620 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3869,7 +3869,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; - if (!sas_device_priv_data) + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) continue; if (sas_device_priv_data->sas_target->sas_address != sas_address) @@ -6406,11 +6406,26 @@ _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) int i, j, count = 0, lcount = 0; int ret; u64 sas_addr; + u8 num_phys; drsprintk(ioc, ioc_info(ioc, "updating ports for sas_host(0x%016llx)\n", (unsigned long long)ioc->sas_hba.sas_address)); + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + if (num_phys > ioc->sas_hba.nr_phys_allocated) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + port_table = kcalloc(ioc->sas_hba.num_phys, sizeof(struct hba_port), GFP_KERNEL); if (!port_table) @@ -6611,6 +6626,30 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) ioc->sas_hba.phy[i].hba_vphy = 1; } + /* + * Add new HBA phys to STL if these new phys got added as part + * of HBA Firmware upgrade/downgrade operation. + */ + if (!ioc->sas_hba.phy[i].phy) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, + &phy_pg0, i))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc->sas_hba.phy[i].phy_id = i; + mpt3sas_transport_add_host_phy(ioc, + &ioc->sas_hba.phy[i], phy_pg0, + ioc->sas_hba.parent_dev); + continue; + } ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. AttachedDevHandle); @@ -6622,6 +6661,19 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) attached_handle, i, link_rate, ioc->sas_hba.phy[i].port); } + /* + * Clear the phy details if this phy got disabled as part of + * HBA Firmware upgrade/downgrade operation. + */ + for (i = ioc->sas_hba.num_phys; + i < ioc->sas_hba.nr_phys_allocated; i++) { + if (ioc->sas_hba.phy[i].phy && + ioc->sas_hba.phy[i].phy->negotiated_linkrate >= + SAS_LINK_RATE_1_5_GBPS) + mpt3sas_transport_update_links(ioc, + ioc->sas_hba.sas_address, 0, i, + MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL); + } out: kfree(sas_iounit_pg0); } @@ -6654,7 +6706,10 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) __FILE__, __LINE__, __func__); return; } - ioc->sas_hba.phy = kcalloc(num_phys, + + ioc->sas_hba.nr_phys_allocated = max_t(u8, + MPT_MAX_HBA_NUM_PHYS, num_phys); + ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated, sizeof(struct _sas_phy), GFP_KERNEL); if (!ioc->sas_hba.phy) { ioc_err(ioc, "failure at %s:%d/%s()!\n", diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c index 2e37b189cb75..53d2b8562027 100644 --- a/drivers/scsi/qla2xxx/qla_edif.c +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -865,7 +865,7 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) "APP request entry - portid=%06x.\n", tdid.b24); /* Ran out of space */ - if (pcnt > app_req.num_ports) + if (pcnt >= app_req.num_ports) break; if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 1d0278da9041..3c0da3770edf 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1189,7 +1189,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr, __func__, off_dst, scsi_bufflen(scp), act_len, scsi_get_resid(scp)); n = scsi_bufflen(scp) - (off_dst + act_len); - scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n)); + scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n)); return 0; } @@ -1562,7 +1562,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) unsigned char pq_pdt; unsigned char *arr; unsigned char *cmd = scp->cmnd; - int alloc_len, n, ret; + u32 alloc_len, n; + int ret; bool have_wlun, is_disk, is_zbc, is_disk_zbc; alloc_len = get_unaligned_be16(cmd + 3); @@ -1585,7 +1586,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) kfree(arr); return check_condition_result; } else if (0x1 & cmd[1]) { /* EVPD bit set */ - int lu_id_num, port_group_id, target_dev_id, len; + int lu_id_num, port_group_id, target_dev_id; + u32 len; char lu_id_str[6]; int host_no = devip->sdbg_host->shost->host_no; @@ -1676,9 +1678,9 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) kfree(arr); return check_condition_result; } - len = min(get_unaligned_be16(arr + 2) + 4, alloc_len); + len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len); ret = fill_from_dev_buffer(scp, arr, - min(len, SDEBUG_MAX_INQ_ARR_SZ)); + min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ)); kfree(arr); return ret; } @@ -1714,7 +1716,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ ret = fill_from_dev_buffer(scp, arr, - min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ)); + min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ)); kfree(arr); return ret; } @@ -1729,8 +1731,8 @@ static int resp_requests(struct scsi_cmnd *scp, unsigned char *cmd = scp->cmnd; unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */ bool dsense = !!(cmd[1] & 1); - int alloc_len = cmd[4]; - int len = 18; + u32 alloc_len = cmd[4]; + u32 len = 18; int stopped_state = atomic_read(&devip->stopped); memset(arr, 0, sizeof(arr)); @@ -1774,7 +1776,7 @@ static int resp_requests(struct scsi_cmnd *scp, arr[7] = 0xa; } } - return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len)); + return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len)); } static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) @@ -2312,7 +2314,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp, { int pcontrol, pcode, subpcode, bd_len; unsigned char dev_spec; - int alloc_len, offset, len, target_dev_id; + u32 alloc_len, offset, len; + int target_dev_id; int target = scp->device->id; unsigned char *ap; unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; @@ -2468,7 +2471,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp, arr[0] = offset - 1; else put_unaligned_be16((offset - 2), arr + 0); - return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset)); + return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset)); } #define SDEBUG_MAX_MSELECT_SZ 512 @@ -2499,11 +2502,11 @@ static int resp_mode_select(struct scsi_cmnd *scp, __func__, param_len, res); md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); - if (md_len > 2) { + off = bd_len + (mselect6 ? 4 : 8); + if (md_len > 2 || off >= res) { mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); return check_condition_result; } - off = bd_len + (mselect6 ? 4 : 8); mpage = arr[off] & 0x3f; ps = !!(arr[off] & 0x80); if (ps) { @@ -2583,7 +2586,8 @@ static int resp_ie_l_pg(unsigned char *arr) static int resp_log_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { - int ppc, sp, pcode, subpcode, alloc_len, len, n; + int ppc, sp, pcode, subpcode; + u32 alloc_len, len, n; unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; unsigned char *cmd = scp->cmnd; @@ -2653,9 +2657,9 @@ static int resp_log_sense(struct scsi_cmnd *scp, mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); return check_condition_result; } - len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len); + len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len); return fill_from_dev_buffer(scp, arr, - min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ)); + min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ)); } static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip) @@ -4430,7 +4434,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, put_unaligned_be64(sdebug_capacity - 1, arr + 8); rep_len = (unsigned long)desc - (unsigned long)arr; - ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len)); + ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len)); fini: read_unlock(macc_lckp); @@ -4653,6 +4657,7 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip, struct sdeb_zone_state *zsp) { enum sdebug_z_cond zc; + struct sdeb_store_info *sip = devip2sip(devip, false); if (zbc_zone_is_conv(zsp)) return; @@ -4664,6 +4669,10 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip, if (zsp->z_cond == ZC4_CLOSED) devip->nr_closed--; + if (zsp->z_wp > zsp->z_start) + memset(sip->storep + zsp->z_start * sdebug_sector_size, 0, + (zsp->z_wp - zsp->z_start) * sdebug_sector_size); + zsp->z_non_seq_resource = false; zsp->z_wp = zsp->z_start; zsp->z_cond = ZC1_EMPTY; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 7afcec250f9b..d4edce930a4a 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -812,7 +812,7 @@ store_state_field(struct device *dev, struct device_attribute *attr, mutex_lock(&sdev->state_mutex); if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { - ret = count; + ret = 0; } else { ret = scsi_device_set_state(sdev, state); if (ret == 0 && state == SDEV_RUNNING) diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index fc5b214347b3..5393b5c9dd9c 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -1189,6 +1189,7 @@ static int ufs_mtk_probe(struct platform_device *pdev) } link = device_link_add(dev, &reset_pdev->dev, DL_FLAG_AUTOPROBE_CONSUMER); + put_device(&reset_pdev->dev); if (!link) { dev_notice(dev, "add reset device_link fail\n"); goto skip_reset; diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 51424557810d..f725248ba57f 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -421,6 +421,13 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba) return err; } +static int ufs_intel_adl_init(struct ufs_hba *hba) +{ + hba->nop_out_timeout = 200; + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + return ufs_intel_common_init(hba); +} + static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { .name = "intel-pci", .init = ufs_intel_common_init, @@ -449,6 +456,15 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = { .device_reset = ufs_intel_device_reset, }; +static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_adl_init, + .exit = ufs_intel_common_exit, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, + .device_reset = ufs_intel_device_reset, +}; + #ifdef CONFIG_PM_SLEEP static int ufshcd_pci_restore(struct device *dev) { @@ -563,6 +579,8 @@ static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops }, + { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, { } /* terminate list */ }; diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 2e31e1413826..ded5ba9b1466 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -331,7 +331,7 @@ ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, cdb[0] = UFSHPB_READ; if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ) - ppn_tmp = swab64(ppn); + ppn_tmp = (__force __be64)swab64((__force u64)ppn); /* ppn value is stored as big-endian in the host memory */ memcpy(&cdb[6], &ppn_tmp, sizeof(__be64)); diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 19f7d7b90625..28e1d98ae102 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -977,7 +977,6 @@ static unsigned int features[] = { static struct virtio_driver virtio_scsi_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), - .suppress_used_validation = true, .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 0b65de9f2df1..95a88f6224cd 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -520,7 +520,7 @@ static ssize_t target_fabric_port_alua_tg_pt_gp_show(struct config_item *item, { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_tg_pt_gp_info(lun, page); @@ -531,7 +531,7 @@ static ssize_t target_fabric_port_alua_tg_pt_gp_store(struct config_item *item, { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_tg_pt_gp_info(lun, page, count); @@ -542,7 +542,7 @@ static ssize_t target_fabric_port_alua_tg_pt_offline_show( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_offline_bit(lun, page); @@ -553,7 +553,7 @@ static ssize_t target_fabric_port_alua_tg_pt_offline_store( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_offline_bit(lun, page, count); @@ -564,7 +564,7 @@ static ssize_t target_fabric_port_alua_tg_pt_status_show( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_secondary_status(lun, page); @@ -575,7 +575,7 @@ static ssize_t target_fabric_port_alua_tg_pt_status_store( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_secondary_status(lun, page, count); @@ -586,7 +586,7 @@ static ssize_t target_fabric_port_alua_tg_pt_write_md_show( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_secondary_write_metadata(lun, page); @@ -597,7 +597,7 @@ static ssize_t target_fabric_port_alua_tg_pt_write_md_store( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_secondary_write_metadata(lun, page, count); diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 22703a0dbd07..4c76498d3fb0 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -40,11 +40,11 @@ static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf) * * See spc4r17 section 6.4.2 Table 135 */ - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (tg_pt_gp) buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); } static u16 @@ -325,14 +325,14 @@ check_t10_vend_desc: * Get the PROTOCOL IDENTIFIER as defined by spc4r17 * section 7.5.1 Table 362 */ - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (!tg_pt_gp) { - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); goto check_lu_gp; } tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); buf[off] = tpg->proto_id << 4; buf[off++] |= 0x1; /* CODE SET == Binary */ diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c index 7f656fac503f..5163d60756b7 100644 --- a/drivers/tty/serial/8250/8250_bcm7271.c +++ b/drivers/tty/serial/8250/8250_bcm7271.c @@ -237,6 +237,7 @@ struct brcmuart_priv { u32 rx_err; u32 rx_timeout; u32 rx_abort; + u32 saved_mctrl; }; static struct dentry *brcmuart_debugfs_root; @@ -1133,16 +1134,27 @@ static int brcmuart_remove(struct platform_device *pdev) static int __maybe_unused brcmuart_suspend(struct device *dev) { struct brcmuart_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); + struct uart_port *port = &up->port; serial8250_suspend_port(priv->line); clk_disable_unprepare(priv->baud_mux_clk); + /* + * This will prevent resume from enabling RTS before the + * baud rate has been resored. + */ + priv->saved_mctrl = port->mctrl; + port->mctrl = 0; + return 0; } static int __maybe_unused brcmuart_resume(struct device *dev) { struct brcmuart_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); + struct uart_port *port = &up->port; int ret; ret = clk_prepare_enable(priv->baud_mux_clk); @@ -1165,6 +1177,7 @@ static int __maybe_unused brcmuart_resume(struct device *dev) start_rx_dma(serial8250_get_port(priv->line)); } serial8250_resume_port(priv->line); + port->mctrl = priv->saved_mctrl; return 0; } diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 5d43de143f33..60f8fffdfd77 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1324,29 +1324,33 @@ pericom_do_set_divisor(struct uart_port *port, unsigned int baud, { int scr; int lcr; - int actual_baud; - int tolerance; - for (scr = 5 ; scr <= 15 ; scr++) { - actual_baud = 921600 * 16 / scr; - tolerance = actual_baud / 50; + for (scr = 16; scr > 4; scr--) { + unsigned int maxrate = port->uartclk / scr; + unsigned int divisor = max(maxrate / baud, 1U); + int delta = maxrate / divisor - baud; - if ((baud < actual_baud + tolerance) && - (baud > actual_baud - tolerance)) { + if (baud > maxrate + baud / 50) + continue; + if (delta > baud / 50) + divisor++; + + if (divisor > 0xffff) + continue; + + /* Update delta due to possible divisor change */ + delta = maxrate / divisor - baud; + if (abs(delta) < baud / 50) { lcr = serial_port_in(port, UART_LCR); serial_port_out(port, UART_LCR, lcr | 0x80); - - serial_port_out(port, UART_DLL, 1); - serial_port_out(port, UART_DLM, 0); + serial_port_out(port, UART_DLL, divisor & 0xff); + serial_port_out(port, UART_DLM, divisor >> 8 & 0xff); serial_port_out(port, 2, 16 - scr); serial_port_out(port, UART_LCR, lcr); return; - } else if (baud > actual_baud) { - break; } } - serial8250_do_set_divisor(port, baud, quot, quot_frac); } static int pci_pericom_setup(struct serial_private *priv, const struct pciserial_board *board, @@ -2291,7 +2295,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = { .setup = pci_pericom_setup_four_at_eight, }, { - .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S, + .vendor = PCI_VENDOR_ID_ACCESIO, .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, @@ -2299,6 +2303,13 @@ static struct pci_serial_quirk pci_serial_quirks[] = { }, { .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup_four_at_eight, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 5775cbff8f6e..46e2079ad1aa 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2024,13 +2024,6 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl) struct uart_8250_port *up = up_to_u8250p(port); unsigned char mcr; - if (port->rs485.flags & SER_RS485_ENABLED) { - if (serial8250_in_MCR(up) & UART_MCR_RTS) - mctrl |= TIOCM_RTS; - else - mctrl &= ~TIOCM_RTS; - } - mcr = serial8250_TIOCM_to_MCR(mctrl); mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 6ff94cfcd9db..fc543ac97c13 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1533,7 +1533,7 @@ config SERIAL_LITEUART tristate "LiteUART serial port support" depends on HAS_IOMEM depends on OF || COMPILE_TEST - depends on LITEX + depends on LITEX || COMPILE_TEST select SERIAL_CORE help This driver is for the FPGA-based LiteUART serial controller from LiteX diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index d361cd84ff8c..52518a606c06 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2947,6 +2947,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match); static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = { { "ARMH0011", 0 }, + { "ARMHB000", 0 }, {}, }; MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index b1e7190ae483..ac5112def40d 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -2625,6 +2625,7 @@ OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup); EARLYCON_DECLARE(lpuart, lpuart_early_console_setup); EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup); diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c index dbc0559a9157..2941659e5274 100644 --- a/drivers/tty/serial/liteuart.c +++ b/drivers/tty/serial/liteuart.c @@ -270,8 +270,10 @@ static int liteuart_probe(struct platform_device *pdev) /* get membase */ port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); - if (IS_ERR(port->membase)) - return PTR_ERR(port->membase); + if (IS_ERR(port->membase)) { + ret = PTR_ERR(port->membase); + goto err_erase_id; + } /* values not from device tree */ port->dev = &pdev->dev; @@ -285,7 +287,18 @@ static int liteuart_probe(struct platform_device *pdev) port->line = dev_id; spin_lock_init(&port->lock); - return uart_add_one_port(&liteuart_driver, &uart->port); + platform_set_drvdata(pdev, port); + + ret = uart_add_one_port(&liteuart_driver, &uart->port); + if (ret) + goto err_erase_id; + + return 0; + +err_erase_id: + xa_erase(&liteuart_array, uart->id); + + return ret; } static int liteuart_remove(struct platform_device *pdev) @@ -293,6 +306,7 @@ static int liteuart_remove(struct platform_device *pdev) struct uart_port *port = platform_get_drvdata(pdev); struct liteuart_port *uart = to_liteuart_port(port); + uart_remove_one_port(&liteuart_driver, port); xa_erase(&liteuart_array, uart->id); return 0; diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index fcef7a961430..489d19274f9a 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -598,6 +598,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port) u32 val; int ret; + if (IS_ENABLED(CONFIG_CONSOLE_POLL)) + return; + if (!dma->chan) return; diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 45e2e4109acd..b6223fab0687 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -1506,7 +1506,7 @@ static struct tegra_uart_chip_data tegra20_uart_chip_data = { .fifo_mode_enable_status = false, .uart_max_port = 5, .max_dma_burst_bytes = 4, - .error_tolerance_low_range = 0, + .error_tolerance_low_range = -4, .error_tolerance_high_range = 4, }; @@ -1517,7 +1517,7 @@ static struct tegra_uart_chip_data tegra30_uart_chip_data = { .fifo_mode_enable_status = false, .uart_max_port = 5, .max_dma_burst_bytes = 4, - .error_tolerance_low_range = 0, + .error_tolerance_low_range = -4, .error_tolerance_high_range = 4, }; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 1e738f265eea..61e3dd0222af 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1075,6 +1075,11 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) goto out; if (!tty_io_error(tty)) { + if (uport->rs485.flags & SER_RS485_ENABLED) { + set &= ~TIOCM_RTS; + clear &= ~TIOCM_RTS; + } + uart_update_mctrl(uport, set, clear); ret = 0; } @@ -1549,6 +1554,7 @@ static void uart_tty_port_shutdown(struct tty_port *port) { struct uart_state *state = container_of(port, struct uart_state, port); struct uart_port *uport = uart_port_check(state); + char *buf; /* * At this point, we stop accepting input. To do this, we @@ -1570,8 +1576,18 @@ static void uart_tty_port_shutdown(struct tty_port *port) */ tty_port_set_suspended(port, 0); - uart_change_pm(state, UART_PM_STATE_OFF); + /* + * Free the transmit buffer. + */ + spin_lock_irq(&uport->lock); + buf = state->xmit.buf; + state->xmit.buf = NULL; + spin_unlock_irq(&uport->lock); + if (buf) + free_page((unsigned long)buf); + + uart_change_pm(state, UART_PM_STATE_OFF); } static void uart_wait_until_sent(struct tty_struct *tty, int timeout) diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index 1f3b4a142212..f9af7ebe003d 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -337,19 +337,6 @@ static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); } -static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req) -{ - struct cdns3_endpoint *priv_ep = priv_req->priv_ep; - int current_trb = priv_req->start_trb; - - while (current_trb != priv_req->end_trb) { - cdns3_ep_inc_deq(priv_ep); - current_trb = priv_ep->dequeue; - } - - cdns3_ep_inc_deq(priv_ep); -} - /** * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. * @priv_dev: Extended gadget object @@ -1517,10 +1504,11 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, trb = priv_ep->trb_pool + priv_ep->dequeue; - /* Request was dequeued and TRB was changed to TRB_LINK. */ - if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { + /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */ + while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { trace_cdns3_complete_trb(priv_ep, trb); - cdns3_move_deq_to_next_trb(priv_req); + cdns3_ep_inc_deq(priv_ep); + trb = priv_ep->trb_pool + priv_ep->dequeue; } if (!request->stream_id) { diff --git a/drivers/usb/cdns3/cdnsp-mem.c b/drivers/usb/cdns3/cdnsp-mem.c index ad9aee3f1e39..97866bfb2da9 100644 --- a/drivers/usb/cdns3/cdnsp-mem.c +++ b/drivers/usb/cdns3/cdnsp-mem.c @@ -987,6 +987,9 @@ int cdnsp_endpoint_init(struct cdnsp_device *pdev, /* Set up the endpoint ring. */ pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags); + if (!pep->ring) + return -ENOMEM; + pep->skip = false; /* Fill the endpoint context */ diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c index 84dadfa726aa..9643b905e2d8 100644 --- a/drivers/usb/cdns3/host.c +++ b/drivers/usb/cdns3/host.c @@ -10,6 +10,7 @@ */ #include <linux/platform_device.h> +#include <linux/slab.h> #include "core.h" #include "drd.h" #include "host-export.h" diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 8239fe7129dd..019351c0b52c 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -434,6 +434,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1532, 0x0116), .driver_info = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */ + { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM }, + /* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */ { USB_DEVICE(0x17ef, 0xa012), .driver_info = USB_QUIRK_DISCONNECT_SUSPEND }, diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 311597bba80e..eaa49aef2935 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -366,7 +366,9 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, /* Must be called with xhci->lock held, releases and aquires lock back */ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) { - u32 temp_32; + struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; + union xhci_trb *new_deq = xhci->cmd_ring->dequeue; + u64 crcr; int ret; xhci_dbg(xhci, "Abort command ring\n"); @@ -375,13 +377,18 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) /* * The control bits like command stop, abort are located in lower - * dword of the command ring control register. Limit the write - * to the lower dword to avoid corrupting the command ring pointer - * in case if the command ring is stopped by the time upper dword - * is written. + * dword of the command ring control register. + * Some controllers require all 64 bits to be written to abort the ring. + * Make sure the upper dword is valid, pointing to the next command, + * avoiding corrupting the command ring pointer in case the command ring + * is stopped by the time the upper dword is written. */ - temp_32 = readl(&xhci->op_regs->cmd_ring); - writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); + next_trb(xhci, NULL, &new_seg, &new_deq); + if (trb_is_link(new_deq)) + next_trb(xhci, NULL, &new_seg, &new_deq); + + crcr = xhci_trb_virt_to_dma(new_seg, new_deq); + xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the * completion of the Command Abort operation. If CRR is not negated in 5 diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 7f2f3ff1b391..6010b9901126 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -4110,11 +4110,7 @@ static void run_state_machine(struct tcpm_port *port) tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED, 0); - else - /* Wait for VBUS, but not forever */ - tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON); break; - case SRC_TRY: port->try_src_count++; tcpm_set_cc(port, tcpm_rp_cc(port)); diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 5f484fff8dbe..41b0cd17fcba 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -591,8 +591,11 @@ static void vdpasim_free(struct vdpa_device *vdpa) vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); } - put_iova_domain(&vdpasim->iova); - iova_cache_put(); + if (vdpa_get_dma_dev(vdpa)) { + put_iova_domain(&vdpasim->iova); + iova_cache_put(); + } + kvfree(vdpasim->buffer); if (vdpasim->iommu) vhost_iotlb_free(vdpasim->iommu); diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c index 56cd551e0e04..362f91ec8845 100644 --- a/drivers/vfio/pci/vfio_pci_igd.c +++ b/drivers/vfio/pci/vfio_pci_igd.c @@ -98,7 +98,8 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev, version = cpu_to_le16(0x0201); if (igd_opregion_shift_copy(buf, &off, - &version + (pos - OPREGION_VERSION), + (u8 *)&version + + (pos - OPREGION_VERSION), &pos, &remaining, bytes)) return -EFAULT; } @@ -121,7 +122,7 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev, OPREGION_SIZE : 0); if (igd_opregion_shift_copy(buf, &off, - &rvda + (pos - OPREGION_RVDA), + (u8 *)&rvda + (pos - OPREGION_RVDA), &pos, &remaining, bytes)) return -EFAULT; } diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 82fb75464f92..735d1d344af9 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -232,7 +232,7 @@ static inline bool vfio_iommu_driver_allowed(struct vfio_container *container, } #endif /* CONFIG_VFIO_NOIOMMU */ -/** +/* * IOMMU driver registration */ int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops) @@ -285,7 +285,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, unsigned long action, void *data); static void vfio_group_get(struct vfio_group *group); -/** +/* * Container objects - containers are created when /dev/vfio/vfio is * opened, but their lifecycle extends until the last user is done, so * it's freed via kref. Must support container/group/device being @@ -309,7 +309,7 @@ static void vfio_container_put(struct vfio_container *container) kref_put(&container->kref, vfio_container_release); } -/** +/* * Group objects - create, release, get, put, search */ static struct vfio_group * @@ -488,7 +488,7 @@ static struct vfio_group *vfio_group_get_from_dev(struct device *dev) return group; } -/** +/* * Device objects - create, release, get, put, search */ /* Device reference always implies a group reference */ @@ -595,7 +595,7 @@ static int vfio_dev_viable(struct device *dev, void *data) return ret; } -/** +/* * Async device support */ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) @@ -689,7 +689,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, return NOTIFY_OK; } -/** +/* * VFIO driver API */ void vfio_init_group_dev(struct vfio_device *device, struct device *dev, @@ -831,7 +831,7 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device) } EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev); -/** +/* * Get a reference to the vfio_device for a device. Even if the * caller thinks they own the device, they could be racing with a * release call path, so we can't trust drvdata for the shortcut. @@ -965,7 +965,7 @@ void vfio_unregister_group_dev(struct vfio_device *device) } EXPORT_SYMBOL_GPL(vfio_unregister_group_dev); -/** +/* * VFIO base fd, /dev/vfio/vfio */ static long vfio_ioctl_check_extension(struct vfio_container *container, @@ -1183,7 +1183,7 @@ static const struct file_operations vfio_fops = { .compat_ioctl = compat_ptr_ioctl, }; -/** +/* * VFIO Group fd, /dev/vfio/$GROUP */ static void __vfio_group_unset_container(struct vfio_group *group) @@ -1536,7 +1536,7 @@ static const struct file_operations vfio_group_fops = { .release = vfio_group_fops_release, }; -/** +/* * VFIO Device fd */ static int vfio_device_fops_release(struct inode *inode, struct file *filep) @@ -1611,7 +1611,7 @@ static const struct file_operations vfio_device_fops = { .mmap = vfio_device_fops_mmap, }; -/** +/* * External user API, exported by symbols to be linked dynamically. * * The protocol includes: @@ -1659,7 +1659,7 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep) } EXPORT_SYMBOL_GPL(vfio_group_get_external_user); -/** +/* * External user API, exported by symbols to be linked dynamically. * The external user passes in a device pointer * to verify that: @@ -1725,7 +1725,7 @@ long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) } EXPORT_SYMBOL_GPL(vfio_external_check_extension); -/** +/* * Sub-module support */ /* @@ -2272,7 +2272,7 @@ struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group) } EXPORT_SYMBOL_GPL(vfio_group_iommu_domain); -/** +/* * Module/class support */ static char *vfio_devnode(struct device *dev, umode_t *mode) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 01c59ce7e250..29cced1cd277 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -1014,12 +1014,12 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep) mutex_lock(&d->mutex); filep->private_data = NULL; + vhost_vdpa_clean_irq(v); vhost_vdpa_reset(v); vhost_dev_stop(&v->vdev); vhost_vdpa_iotlb_free(v); vhost_vdpa_free_domain(v); vhost_vdpa_config_put(v); - vhost_vdpa_clean_irq(v); vhost_dev_cleanup(&v->vdev); kfree(v->vdev.vqs); mutex_unlock(&d->mutex); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 938aefbc75ec..d6ca1c7ad513 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -511,8 +511,6 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) vhost_disable_notify(&vsock->dev, vq); do { - u32 len; - if (!vhost_vsock_more_replies(vsock)) { /* Stop tx until the device processes already * pending replies. Leave tx virtqueue @@ -540,7 +538,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) continue; } - len = pkt->len; + total_len += sizeof(pkt->hdr) + pkt->len; /* Deliver to monitoring devices all received packets */ virtio_transport_deliver_tap_pkt(pkt); @@ -553,9 +551,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) else virtio_transport_free_pkt(pkt); - len += sizeof(pkt->hdr); - vhost_add_used(vq, head, len); - total_len += len; + vhost_add_used(vq, head, 0); added = true; } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index ef9c57ce0906..9a49ea6b5112 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -366,11 +366,17 @@ static void vgacon_init(struct vc_data *c, int init) struct uni_pagedir *p; /* - * We cannot be loaded as a module, therefore init is always 1, - * but vgacon_init can be called more than once, and init will - * not be 1. + * We cannot be loaded as a module, therefore init will be 1 + * if we are the default console, however if we are a fallback + * console, for example if fbcon has failed registration, then + * init will be 0, so we need to make sure our boot parameters + * have been copied to the console structure for vgacon_resize + * ultimately called by vc_resize. Any subsequent calls to + * vgacon_init init will have init set to 0 too. */ c->vc_can_do_color = vga_can_do_color; + c->vc_scan_lines = vga_scan_lines; + c->vc_font.height = c->vc_cell_height = vga_video_font_height; /* set dimensions manually if init != 0 since vc_resize() will fail */ if (init) { @@ -379,8 +385,6 @@ static void vgacon_init(struct vc_data *c, int init) } else vc_resize(c, vga_video_num_columns, vga_video_num_lines); - c->vc_scan_lines = vga_scan_lines; - c->vc_font.height = c->vc_cell_height = vga_video_font_height; c->vc_complement_mask = 0x7700; if (vga_512_chars) c->vc_hi_font_mask = 0x0800; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 00f64f2f8b72..6d2614e34470 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -14,9 +14,6 @@ #include <linux/spinlock.h> #include <xen/xen.h> -static bool force_used_validation = false; -module_param(force_used_validation, bool, 0444); - #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ #define BAD_RING(_vq, fmt, args...) \ @@ -185,9 +182,6 @@ struct vring_virtqueue { } packed; }; - /* Per-descriptor in buffer length */ - u32 *buflen; - /* How to notify other side. FIXME: commonalize hcalls! */ bool (*notify)(struct virtqueue *vq); @@ -496,7 +490,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, unsigned int i, n, avail, descs_used, prev, err_idx; int head; bool indirect; - u32 buflen = 0; START_USE(vq); @@ -578,7 +571,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, indirect); - buflen += sg->length; } } /* Last one doesn't continue. */ @@ -618,10 +610,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, else vq->split.desc_state[head].indir_desc = ctx; - /* Store in buffer length if necessary */ - if (vq->buflen) - vq->buflen[head] = buflen; - /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); @@ -796,11 +784,6 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } - if (vq->buflen && unlikely(*len > vq->buflen[i])) { - BAD_RING(vq, "used len %d is larger than in buflen %u\n", - *len, vq->buflen[i]); - return NULL; - } /* detach_buf_split clears data, so grab it now. */ ret = vq->split.desc_state[i].data; @@ -1079,7 +1062,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, unsigned int i, n, err_idx; u16 head, id; dma_addr_t addr; - u32 buflen = 0; head = vq->packed.next_avail_idx; desc = alloc_indirect_packed(total_sg, gfp); @@ -1109,8 +1091,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, desc[i].addr = cpu_to_le64(addr); desc[i].len = cpu_to_le32(sg->length); i++; - if (n >= out_sgs) - buflen += sg->length; } } @@ -1164,10 +1144,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, vq->packed.desc_state[id].indir_desc = desc; vq->packed.desc_state[id].last = id; - /* Store in buffer length if necessary */ - if (vq->buflen) - vq->buflen[id] = buflen; - vq->num_added += 1; pr_debug("Added buffer head %i to %p\n", head, vq); @@ -1203,7 +1179,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, __le16 head_flags, flags; u16 head, id, prev, curr, avail_used_flags; int err; - u32 buflen = 0; START_USE(vq); @@ -1283,8 +1258,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, 1 << VRING_PACKED_DESC_F_AVAIL | 1 << VRING_PACKED_DESC_F_USED; } - if (n >= out_sgs) - buflen += sg->length; } } @@ -1304,10 +1277,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, vq->packed.desc_state[id].indir_desc = ctx; vq->packed.desc_state[id].last = prev; - /* Store in buffer length if necessary */ - if (vq->buflen) - vq->buflen[id] = buflen; - /* * A driver MUST NOT make the first descriptor in the list * available before all subsequent descriptors comprising @@ -1494,11 +1463,6 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, BAD_RING(vq, "id %u is not a head!\n", id); return NULL; } - if (vq->buflen && unlikely(*len > vq->buflen[id])) { - BAD_RING(vq, "used len %d is larger than in buflen %u\n", - *len, vq->buflen[id]); - return NULL; - } /* detach_buf_packed clears data, so grab it now. */ ret = vq->packed.desc_state[id].data; @@ -1704,7 +1668,6 @@ static struct virtqueue *vring_create_virtqueue_packed( struct vring_virtqueue *vq; struct vring_packed_desc *ring; struct vring_packed_desc_event *driver, *device; - struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr; size_t ring_size_in_bytes, event_size_in_bytes; @@ -1794,15 +1757,6 @@ static struct virtqueue *vring_create_virtqueue_packed( if (!vq->packed.desc_extra) goto err_desc_extra; - if (!drv->suppress_used_validation || force_used_validation) { - vq->buflen = kmalloc_array(num, sizeof(*vq->buflen), - GFP_KERNEL); - if (!vq->buflen) - goto err_buflen; - } else { - vq->buflen = NULL; - } - /* No callback? Tell other side not to bother us. */ if (!callback) { vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; @@ -1815,8 +1769,6 @@ static struct virtqueue *vring_create_virtqueue_packed( spin_unlock(&vdev->vqs_list_lock); return &vq->vq; -err_buflen: - kfree(vq->packed.desc_extra); err_desc_extra: kfree(vq->packed.desc_state); err_desc_state: @@ -2224,7 +2176,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, void (*callback)(struct virtqueue *), const char *name) { - struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); struct vring_virtqueue *vq; if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) @@ -2284,15 +2235,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, if (!vq->split.desc_extra) goto err_extra; - if (!drv->suppress_used_validation || force_used_validation) { - vq->buflen = kmalloc_array(vring.num, sizeof(*vq->buflen), - GFP_KERNEL); - if (!vq->buflen) - goto err_buflen; - } else { - vq->buflen = NULL; - } - /* Put everything in free lists. */ vq->free_head = 0; memset(vq->split.desc_state, 0, vring.num * @@ -2303,8 +2245,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, spin_unlock(&vdev->vqs_list_lock); return &vq->vq; -err_buflen: - kfree(vq->split.desc_extra); err_extra: kfree(vq->split.desc_state); err_state: diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 6b705026da1a..18448dbd762a 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1562,6 +1562,10 @@ smbd_connected: /* fscache server cookies are based on primary channel only */ if (!CIFS_SERVER_IS_CHAN(tcp_ses)) cifs_fscache_get_client_cookie(tcp_ses); +#ifdef CONFIG_CIFS_FSCACHE + else + tcp_ses->fscache = tcp_ses->primary_server->fscache; +#endif /* CONFIG_CIFS_FSCACHE */ /* queue echo request delayed work */ queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval); @@ -3046,12 +3050,6 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx) cifs_dbg(VFS, "read only mount of RW share\n"); /* no need to log a RW mount of a typical RW share */ } - /* - * The cookie is initialized from volume info returned above. - * Inside cifs_fscache_get_super_cookie it checks - * that we do not get super cookie twice. - */ - cifs_fscache_get_super_cookie(tcon); } /* @@ -3426,6 +3424,7 @@ static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list */ mount_put_conns(mnt_ctx); mount_get_dfs_conns(mnt_ctx); + set_root_ses(mnt_ctx); full_path = build_unc_path_to_root(ctx, cifs_sb, true); if (IS_ERR(full_path)) diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index 7e409a38a2d7..003c5f1f4dfb 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c @@ -16,14 +16,7 @@ * Key layout of CIFS server cache index object */ struct cifs_server_key { - struct { - uint16_t family; /* address family */ - __be16 port; /* IP port */ - } hdr; - union { - struct in_addr ipv4_addr; - struct in6_addr ipv6_addr; - }; + __u64 conn_id; } __packed; /* @@ -31,42 +24,23 @@ struct cifs_server_key { */ void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) { - const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr; - const struct sockaddr_in *addr = (struct sockaddr_in *) sa; - const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa; struct cifs_server_key key; - uint16_t key_len = sizeof(key.hdr); - - memset(&key, 0, sizeof(key)); /* - * Should not be a problem as sin_family/sin6_family overlays - * sa_family field + * Check if cookie was already initialized so don't reinitialize it. + * In the future, as we integrate with newer fscache features, + * we may want to instead add a check if cookie has changed */ - key.hdr.family = sa->sa_family; - switch (sa->sa_family) { - case AF_INET: - key.hdr.port = addr->sin_port; - key.ipv4_addr = addr->sin_addr; - key_len += sizeof(key.ipv4_addr); - break; - - case AF_INET6: - key.hdr.port = addr6->sin6_port; - key.ipv6_addr = addr6->sin6_addr; - key_len += sizeof(key.ipv6_addr); - break; - - default: - cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family); - server->fscache = NULL; + if (server->fscache) return; - } + + memset(&key, 0, sizeof(key)); + key.conn_id = server->conn_id; server->fscache = fscache_acquire_cookie(cifs_fscache_netfs.primary_index, &cifs_fscache_server_index_def, - &key, key_len, + &key, sizeof(key), NULL, 0, server, 0, true); cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", @@ -92,7 +66,7 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) * In the future, as we integrate with newer fscache features, * we may want to instead add a check if cookie has changed */ - if (tcon->fscache == NULL) + if (tcon->fscache) return; sharename = extract_sharename(tcon->treeName); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 82848412ad85..96d083db1737 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1376,6 +1376,13 @@ iget_no_retry: inode = ERR_PTR(rc); } + /* + * The cookie is initialized from volume info returned above. + * Inside cifs_fscache_get_super_cookie it checks + * that we do not get super cookie twice. + */ + cifs_fscache_get_super_cookie(tcon); + out: kfree(path); free_xid(xid); diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 84da2c280012..ec9a1d780dc1 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -150,7 +150,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, * however in order to avoid some race conditions, add a * DBG_BUGON to observe this in advance. */ - DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp); + DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp); /* last refcount should be connected with its managed pslot. */ erofs_workgroup_unfreeze(grp, 0); @@ -165,15 +165,19 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, unsigned int freed = 0; unsigned long index; + xa_lock(&sbi->managed_pslots); xa_for_each(&sbi->managed_pslots, index, grp) { /* try to shrink each valid workgroup */ if (!erofs_try_to_release_workgroup(sbi, grp)) continue; + xa_unlock(&sbi->managed_pslots); ++freed; if (!--nr_shrink) - break; + return freed; + xa_lock(&sbi->managed_pslots); } + xa_unlock(&sbi->managed_pslots); return freed; } diff --git a/fs/file.c b/fs/file.c index 8627dacfc424..ad4a8bf3cf10 100644 --- a/fs/file.c +++ b/fs/file.c @@ -858,6 +858,10 @@ loop: file = NULL; else if (!get_file_rcu_many(file, refs)) goto loop; + else if (files_lookup_fd_raw(files, fd) != file) { + fput_many(file, refs); + goto loop; + } } rcu_read_unlock(); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 8dbd6fe66420..44a7a4288956 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1857,7 +1857,6 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) { - struct gfs2_holder mock_gh = { .gh_gl = gl, .gh_state = state, }; unsigned long delay = 0; unsigned long holdtime; unsigned long now = jiffies; @@ -1890,8 +1889,13 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) * keep the glock until the last strong holder is done with it. */ if (!find_first_strong_holder(gl)) { - if (state == LM_ST_UNLOCKED) - mock_gh.gh_state = LM_ST_EXCLUSIVE; + struct gfs2_holder mock_gh = { + .gh_gl = gl, + .gh_state = (state == LM_ST_UNLOCKED) ? + LM_ST_EXCLUSIVE : state, + .gh_iflags = BIT(HIF_HOLDER) + }; + demote_incompat_holders(gl, &mock_gh); } handle_callback(gl, state, delay, true); diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 6424b903e885..89905f4f29bb 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -40,37 +40,6 @@ static const struct inode_operations gfs2_file_iops; static const struct inode_operations gfs2_dir_iops; static const struct inode_operations gfs2_symlink_iops; -static int iget_test(struct inode *inode, void *opaque) -{ - u64 no_addr = *(u64 *)opaque; - - return GFS2_I(inode)->i_no_addr == no_addr; -} - -static int iget_set(struct inode *inode, void *opaque) -{ - u64 no_addr = *(u64 *)opaque; - - GFS2_I(inode)->i_no_addr = no_addr; - inode->i_ino = no_addr; - return 0; -} - -static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr) -{ - struct inode *inode; - -repeat: - inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr); - if (!inode) - return inode; - if (is_bad_inode(inode)) { - iput(inode); - goto repeat; - } - return inode; -} - /** * gfs2_set_iop - Sets inode operations * @inode: The inode with correct i_mode filled in @@ -104,6 +73,22 @@ static void gfs2_set_iop(struct inode *inode) } } +static int iget_test(struct inode *inode, void *opaque) +{ + u64 no_addr = *(u64 *)opaque; + + return GFS2_I(inode)->i_no_addr == no_addr; +} + +static int iget_set(struct inode *inode, void *opaque) +{ + u64 no_addr = *(u64 *)opaque; + + GFS2_I(inode)->i_no_addr = no_addr; + inode->i_ino = no_addr; + return 0; +} + /** * gfs2_inode_lookup - Lookup an inode * @sb: The super block @@ -132,12 +117,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, { struct inode *inode; struct gfs2_inode *ip; - struct gfs2_glock *io_gl = NULL; struct gfs2_holder i_gh; int error; gfs2_holder_mark_uninitialized(&i_gh); - inode = gfs2_iget(sb, no_addr); + inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr); if (!inode) return ERR_PTR(-ENOMEM); @@ -145,22 +129,16 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, if (inode->i_state & I_NEW) { struct gfs2_sbd *sdp = GFS2_SB(inode); + struct gfs2_glock *io_gl; error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (unlikely(error)) goto fail; - flush_delayed_work(&ip->i_gl->gl_work); - - error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); - if (unlikely(error)) - goto fail; - if (blktype != GFS2_BLKST_UNLINKED) - gfs2_cancel_delete_work(io_gl); if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) { /* * The GL_SKIP flag indicates to skip reading the inode - * block. We read the inode with gfs2_inode_refresh + * block. We read the inode when instantiating it * after possibly checking the block type. */ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, @@ -181,24 +159,31 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, } } - glock_set_object(ip->i_gl, ip); set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags); - error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); + + error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (unlikely(error)) goto fail; - glock_set_object(ip->i_iopen_gh.gh_gl, ip); + if (blktype != GFS2_BLKST_UNLINKED) + gfs2_cancel_delete_work(io_gl); + error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); gfs2_glock_put(io_gl); - io_gl = NULL; + if (unlikely(error)) + goto fail; /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */ inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1); inode->i_atime.tv_nsec = 0; + glock_set_object(ip->i_gl, ip); + if (type == DT_UNKNOWN) { /* Inode glock must be locked already */ error = gfs2_instantiate(&i_gh); - if (error) + if (error) { + glock_clear_object(ip->i_gl, ip); goto fail; + } } else { ip->i_no_formal_ino = no_formal_ino; inode->i_mode = DT2IF(type); @@ -206,31 +191,23 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, if (gfs2_holder_initialized(&i_gh)) gfs2_glock_dq_uninit(&i_gh); + glock_set_object(ip->i_iopen_gh.gh_gl, ip); gfs2_set_iop(inode); + unlock_new_inode(inode); } if (no_formal_ino && ip->i_no_formal_ino && no_formal_ino != ip->i_no_formal_ino) { - error = -ESTALE; - if (inode->i_state & I_NEW) - goto fail; iput(inode); - return ERR_PTR(error); + return ERR_PTR(-ESTALE); } - if (inode->i_state & I_NEW) - unlock_new_inode(inode); - return inode; fail: - if (gfs2_holder_initialized(&ip->i_iopen_gh)) { - glock_clear_object(ip->i_iopen_gh.gh_gl, ip); + if (gfs2_holder_initialized(&ip->i_iopen_gh)) gfs2_glock_dq_uninit(&ip->i_iopen_gh); - } - if (io_gl) - gfs2_glock_put(io_gl); if (gfs2_holder_initialized(&i_gh)) gfs2_glock_dq_uninit(&i_gh); iget_failed(inode); @@ -730,18 +707,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (error) goto fail_free_inode; - flush_delayed_work(&ip->i_gl->gl_work); error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) goto fail_free_inode; gfs2_cancel_delete_work(io_gl); + error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr); + BUG_ON(error); + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); if (error) goto fail_gunlock2; - glock_set_object(ip->i_gl, ip); error = gfs2_trans_begin(sdp, blocks, 0); if (error) goto fail_gunlock2; @@ -757,9 +735,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, if (error) goto fail_gunlock2; + glock_set_object(ip->i_gl, ip); glock_set_object(io_gl, ip); gfs2_set_iop(inode); - insert_inode_hash(inode); free_vfs_inode = 0; /* After this point, the inode is no longer considered free. Any failures need to undo @@ -801,17 +779,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, gfs2_glock_dq_uninit(ghs + 1); gfs2_glock_put(io_gl); gfs2_qa_put(dip); + unlock_new_inode(inode); return error; fail_gunlock3: + glock_clear_object(ip->i_gl, ip); glock_clear_object(io_gl, ip); gfs2_glock_dq_uninit(&ip->i_iopen_gh); fail_gunlock2: - glock_clear_object(io_gl, ip); gfs2_glock_put(io_gl); fail_free_inode: if (ip->i_gl) { - glock_clear_object(ip->i_gl, ip); if (free_vfs_inode) /* else evict will do the put for us */ gfs2_glock_put(ip->i_gl); } @@ -829,7 +807,10 @@ fail_gunlock: mark_inode_dirty(inode); set_bit(free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED, &GFS2_I(inode)->i_flags); - iput(inode); + if (inode->i_state & I_NEW) + iget_failed(inode); + else + iput(inode); } if (gfs2_holder_initialized(ghs + 1)) gfs2_glock_dq_uninit(ghs + 1); diff --git a/fs/io-wq.c b/fs/io-wq.c index 88202de519f6..50cf9f92da36 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -714,6 +714,13 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data) static inline bool io_should_retry_thread(long err) { + /* + * Prevent perpetual task_work retry, if the task (or its group) is + * exiting. + */ + if (fatal_signal_pending(current)) + return false; + switch (err) { case -EAGAIN: case -ERESTARTSYS: diff --git a/fs/io_uring.c b/fs/io_uring.c index a4c508a1e0cf..c4f217613f56 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1278,6 +1278,7 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) static bool io_match_task(struct io_kiocb *head, struct task_struct *task, bool cancel_all) + __must_hold(&req->ctx->timeout_lock) { struct io_kiocb *req; @@ -1293,6 +1294,44 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task, return false; } +static bool io_match_linked(struct io_kiocb *head) +{ + struct io_kiocb *req; + + io_for_each_link(req, head) { + if (req->flags & REQ_F_INFLIGHT) + return true; + } + return false; +} + +/* + * As io_match_task() but protected against racing with linked timeouts. + * User must not hold timeout_lock. + */ +static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, + bool cancel_all) +{ + bool matched; + + if (task && head->task != task) + return false; + if (cancel_all) + return true; + + if (head->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = head->ctx; + + /* protect against races with linked timeouts */ + spin_lock_irq(&ctx->timeout_lock); + matched = io_match_linked(head); + spin_unlock_irq(&ctx->timeout_lock); + } else { + matched = io_match_linked(head); + } + return matched; +} + static inline bool req_has_async_data(struct io_kiocb *req) { return req->flags & REQ_F_ASYNC_DATA; @@ -4327,6 +4366,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, kfree(nxt); if (++i == nbufs) return i; + cond_resched(); } i++; kfree(buf); @@ -5699,17 +5739,15 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, int posted = 0, i; spin_lock(&ctx->completion_lock); - spin_lock_irq(&ctx->timeout_lock); for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { struct hlist_head *list; list = &ctx->cancel_hash[i]; hlist_for_each_entry_safe(req, tmp, list, hash_node) { - if (io_match_task(req, tsk, cancel_all)) + if (io_match_task_safe(req, tsk, cancel_all)) posted += io_poll_remove_one(req); } } - spin_unlock_irq(&ctx->timeout_lock); spin_unlock(&ctx->completion_lock); if (posted) @@ -6158,6 +6196,9 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) return -EFAULT; + if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0) + return -EINVAL; + data->mode = io_translate_timeout_mode(flags); hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); @@ -6882,10 +6923,11 @@ static inline struct file *io_file_get(struct io_ring_ctx *ctx, static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) { struct io_kiocb *prev = req->timeout.prev; - int ret; + int ret = -ENOENT; if (prev) { - ret = io_try_cancel_userdata(req, prev->user_data); + if (!(req->task->flags & PF_EXITING)) + ret = io_try_cancel_userdata(req, prev->user_data); io_req_complete_post(req, ret ?: -ETIME, 0); io_put_req(prev); } else { @@ -9257,10 +9299,8 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) struct io_buffer *buf; unsigned long index; - xa_for_each(&ctx->io_buffers, index, buf) { + xa_for_each(&ctx->io_buffers, index, buf) __io_remove_buffers(ctx, buf, index, -1U); - cond_resched(); - } } static void io_req_caches_free(struct io_ring_ctx *ctx) @@ -9564,19 +9604,8 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_task_cancel *cancel = data; - bool ret; - - if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) { - struct io_ring_ctx *ctx = req->ctx; - /* protect against races with linked timeouts */ - spin_lock_irq(&ctx->timeout_lock); - ret = io_match_task(req, cancel->task, cancel->all); - spin_unlock_irq(&ctx->timeout_lock); - } else { - ret = io_match_task(req, cancel->task, cancel->all); - } - return ret; + return io_match_task_safe(req, cancel->task, cancel->all); } static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, @@ -9587,14 +9616,12 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, LIST_HEAD(list); spin_lock(&ctx->completion_lock); - spin_lock_irq(&ctx->timeout_lock); list_for_each_entry_reverse(de, &ctx->defer_list, list) { - if (io_match_task(de->req, task, cancel_all)) { + if (io_match_task_safe(de->req, task, cancel_all)) { list_cut_position(&list, &ctx->defer_list, &de->list); break; } } - spin_unlock_irq(&ctx->timeout_lock); spin_unlock(&ctx->completion_lock); if (list_empty(&list)) return false; diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 1753c26c8e76..71a36ae120ee 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -205,7 +205,16 @@ struct iomap_readpage_ctx { struct readahead_control *rac; }; -static loff_t iomap_read_inline_data(const struct iomap_iter *iter, +/** + * iomap_read_inline_data - copy inline data into the page cache + * @iter: iteration structure + * @page: page to copy to + * + * Copy the inline data in @iter into @page and zero out the rest of the page. + * Only a single IOMAP_INLINE extent is allowed at the end of each file. + * Returns zero for success to complete the read, or the usual negative errno. + */ +static int iomap_read_inline_data(const struct iomap_iter *iter, struct page *page) { const struct iomap *iomap = iomap_iter_srcmap(iter); @@ -214,7 +223,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter, void *addr; if (PageUptodate(page)) - return PAGE_SIZE - poff; + return 0; if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) return -EIO; @@ -231,7 +240,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter, memset(addr + size, 0, PAGE_SIZE - poff - size); kunmap_local(addr); iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff); - return PAGE_SIZE - poff; + return 0; } static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, @@ -257,7 +266,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, sector_t sector; if (iomap->type == IOMAP_INLINE) - return min(iomap_read_inline_data(iter, page), length); + return iomap_read_inline_data(iter, page); /* zero post-eof blocks as the page may be mapped */ iop = iomap_page_create(iter->inode, page); @@ -370,6 +379,8 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter, ctx->cur_page_in_bio = false; } ret = iomap_readpage_iter(iter, ctx, done); + if (ret <= 0) + return ret; } return done; @@ -580,15 +591,10 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, static int iomap_write_begin_inline(const struct iomap_iter *iter, struct page *page) { - int ret; - /* needs more work for the tailpacking case; disable for now */ if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) return -EIO; - ret = iomap_read_inline_data(iter, page); - if (ret < 0) - return ret; - return 0; + return iomap_read_inline_data(iter, page); } static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 121f8e8c70ac..49c9da37315c 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -1697,8 +1697,10 @@ int smb2_sess_setup(struct ksmbd_work *work) negblob_off = le16_to_cpu(req->SecurityBufferOffset); negblob_len = le16_to_cpu(req->SecurityBufferLength); if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) || - negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) - return -EINVAL; + negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) { + rc = -EINVAL; + goto out_err; + } negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId + negblob_off); @@ -4457,6 +4459,12 @@ static void get_file_stream_info(struct ksmbd_work *work, &stat); file_info = (struct smb2_file_stream_info *)rsp->Buffer; + buf_free_len = + smb2_calc_max_out_buf_len(work, 8, + le32_to_cpu(req->OutputBufferLength)); + if (buf_free_len < 0) + goto out; + xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list); if (xattr_list_len < 0) { goto out; @@ -4465,12 +4473,6 @@ static void get_file_stream_info(struct ksmbd_work *work, goto out; } - buf_free_len = - smb2_calc_max_out_buf_len(work, 8, - le32_to_cpu(req->OutputBufferLength)); - if (buf_free_len < 0) - goto out; - while (idx < xattr_list_len) { stream_name = xattr_list + idx; streamlen = strlen(stream_name); @@ -4496,8 +4498,10 @@ static void get_file_stream_info(struct ksmbd_work *work, ":%s", &stream_name[XATTR_NAME_STREAM_LEN]); next = sizeof(struct smb2_file_stream_info) + streamlen * 2; - if (next > buf_free_len) + if (next > buf_free_len) { + kfree(stream_buf); break; + } file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes]; streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName, @@ -4514,6 +4518,7 @@ static void get_file_stream_info(struct ksmbd_work *work, file_info->NextEntryOffset = cpu_to_le32(next); } +out: if (!S_ISDIR(stat.mode) && buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) { file_info = (struct smb2_file_stream_info *) @@ -4522,14 +4527,13 @@ static void get_file_stream_info(struct ksmbd_work *work, "::$DATA", 7, conn->local_nls, 0); streamlen *= 2; file_info->StreamNameLength = cpu_to_le32(streamlen); - file_info->StreamSize = 0; - file_info->StreamAllocationSize = 0; + file_info->StreamSize = cpu_to_le64(stat.size); + file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9); nbytes += sizeof(struct smb2_file_stream_info) + streamlen; } /* last entry offset should be 0 */ file_info->NextEntryOffset = 0; -out: kvfree(xattr_list); rsp->OutputBufferLength = cpu_to_le32(nbytes); @@ -5068,7 +5072,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work, if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO | PROTECTED_DACL_SECINFO | UNPROTECTED_DACL_SECINFO)) { - pr_err("Unsupported addition info: 0x%x)\n", + ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n", addition_info); pntsd->revision = cpu_to_le16(1); diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 9320a42dfaf9..75c76cbb27cc 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -354,16 +354,11 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work) netfs_rreq_do_write_to_cache(rreq); } -static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq, - bool was_async) +static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq) { - if (was_async) { - rreq->work.func = netfs_rreq_write_to_cache_work; - if (!queue_work(system_unbound_wq, &rreq->work)) - BUG(); - } else { - netfs_rreq_do_write_to_cache(rreq); - } + rreq->work.func = netfs_rreq_write_to_cache_work; + if (!queue_work(system_unbound_wq, &rreq->work)) + BUG(); } /* @@ -558,7 +553,7 @@ again: wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags)) - return netfs_rreq_write_to_cache(rreq, was_async); + return netfs_rreq_write_to_cache(rreq); netfs_rreq_completed(rreq, was_async); } @@ -960,7 +955,7 @@ int netfs_readpage(struct file *file, rreq = netfs_alloc_read_request(ops, netfs_priv, file); if (!rreq) { if (netfs_priv) - ops->cleanup(netfs_priv, folio_file_mapping(folio)); + ops->cleanup(folio_file_mapping(folio), netfs_priv); folio_unlock(folio); return -ENOMEM; } @@ -1008,8 +1003,8 @@ out: } EXPORT_SYMBOL(netfs_readpage); -/** - * netfs_skip_folio_read - prep a folio for writing without reading first +/* + * Prepare a folio for writing without reading first * @folio: The folio being prepared * @pos: starting position for the write * @len: length of write @@ -1191,7 +1186,7 @@ have_folio: goto error; have_folio_no_wait: if (netfs_priv) - ops->cleanup(netfs_priv, mapping); + ops->cleanup(mapping, netfs_priv); *_folio = folio; _leave(" = 0"); return 0; @@ -1202,7 +1197,7 @@ error: folio_unlock(folio); folio_put(folio); if (netfs_priv) - ops->cleanup(netfs_priv, mapping); + ops->cleanup(mapping, netfs_priv); _leave(" = %d", ret); return ret; } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index dd53704c3f40..fda530d5e764 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -219,6 +219,7 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) NFS_INO_DATA_INVAL_DEFER); else if (nfsi->cache_validity & NFS_INO_INVALID_DATA) nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER; + trace_nfs_set_cache_invalid(inode, 0); } EXPORT_SYMBOL_GPL(nfs_set_cache_invalid); diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 08355b66e7cb..8b21ff1be717 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -289,7 +289,9 @@ static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len) loff_t newsize = pos + len; loff_t end = newsize - 1; - truncate_pagecache_range(inode, pos, end); + WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping, + pos >> PAGE_SHIFT, end >> PAGE_SHIFT)); + spin_lock(&inode->i_lock); if (newsize > i_size_read(inode)) i_size_write(inode, newsize); diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index c8bad735e4c1..271e5f92ed01 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -1434,8 +1434,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp, status = decode_clone(xdr); if (status) goto out; - status = decode_getfattr(xdr, res->dst_fattr, res->server); - + decode_getfattr(xdr, res->dst_fattr, res->server); out: res->rpc_status = status; return status; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index ecc4594299d6..f63dfa01001c 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1998,6 +1998,10 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status) dprintk("%s: exit with error %d for server %s\n", __func__, -EPROTONOSUPPORT, clp->cl_hostname); return -EPROTONOSUPPORT; + case -ENOSPC: + if (clp->cl_cons_state == NFS_CS_SESSION_INITING) + nfs_mark_client_ready(clp, -EIO); + return -EIO; case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery * in nfs4_exchange_id */ default: diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 21dac847f1e4..b3aee261801e 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -162,6 +162,7 @@ DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit); DEFINE_NFS_INODE_EVENT(nfs_fsync_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit); DEFINE_NFS_INODE_EVENT(nfs_access_enter); +DEFINE_NFS_INODE_EVENT_DONE(nfs_set_cache_invalid); TRACE_EVENT(nfs_access_exit, TP_PROTO( diff --git a/fs/ntfs/Kconfig b/fs/ntfs/Kconfig index 1667a7e590d8..f93e69a61283 100644 --- a/fs/ntfs/Kconfig +++ b/fs/ntfs/Kconfig @@ -52,6 +52,7 @@ config NTFS_DEBUG config NTFS_RW bool "NTFS write support" depends on NTFS_FS + depends on PAGE_SIZE_LESS_THAN_64KB help This enables the partial, but safe, write support in the NTFS driver. diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 15c2e55d2ed2..39b823ab2564 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -61,15 +61,27 @@ static int seq_open_net(struct inode *inode, struct file *file) } #ifdef CONFIG_NET_NS p->net = net; + netns_tracker_alloc(net, &p->ns_tracker, GFP_KERNEL); #endif return 0; } +static void seq_file_net_put_net(struct seq_file *seq) +{ +#ifdef CONFIG_NET_NS + struct seq_net_private *priv = seq->private; + + put_net_track(priv->net, &priv->ns_tracker); +#else + put_net(&init_net); +#endif +} + static int seq_release_net(struct inode *ino, struct file *f) { struct seq_file *seq = f->private_data; - put_net(seq_file_net(seq)); + seq_file_net_put_net(seq); seq_release_private(ino, f); return 0; } @@ -87,7 +99,8 @@ int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux) #ifdef CONFIG_NET_NS struct seq_net_private *p = priv_data; - p->net = get_net(current->nsproxy->net_ns); + p->net = get_net_track(current->nsproxy->net_ns, &p->ns_tracker, + GFP_KERNEL); #endif return 0; } @@ -97,7 +110,7 @@ void bpf_iter_fini_seq_net(void *priv_data) #ifdef CONFIG_NET_NS struct seq_net_private *p = priv_data; - put_net(p->net); + put_net_track(p->net, &p->ns_tracker); #endif } diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index fbc9d816882c..23523b802539 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -1077,21 +1077,18 @@ xfs_attr_node_hasname( state = xfs_da_state_alloc(args); if (statep != NULL) - *statep = NULL; + *statep = state; /* * Search to see if name exists, and get back a pointer to it. */ error = xfs_da3_node_lookup_int(state, &retval); - if (error) { - xfs_da_state_free(state); - return error; - } + if (error) + retval = error; - if (statep != NULL) - *statep = state; - else + if (!statep) xfs_da_state_free(state); + return retval; } @@ -1112,7 +1109,7 @@ xfs_attr_node_addname_find_attr( */ retval = xfs_attr_node_hasname(args, &dac->da_state); if (retval != -ENOATTR && retval != -EEXIST) - return retval; + goto error; if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE)) goto error; @@ -1337,7 +1334,7 @@ int xfs_attr_node_removename_setup( error = xfs_attr_node_hasname(args, state); if (error != -EEXIST) - return error; + goto out; error = 0; ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL); diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index e1472004170e..da4af2142a2b 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -289,22 +289,6 @@ xfs_perag_clear_inode_tag( trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_); } -static inline void -xfs_inew_wait( - struct xfs_inode *ip) -{ - wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); - DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); - - do { - prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); - if (!xfs_iflags_test(ip, XFS_INEW)) - break; - schedule(); - } while (true); - finish_wait(wq, &wait.wq_entry); -} - /* * When we recycle a reclaimable inode, we need to re-initialise the VFS inode * part of the structure. This is made more complex by the fact we store @@ -368,18 +352,13 @@ xfs_iget_recycle( ASSERT(!rwsem_is_locked(&inode->i_rwsem)); error = xfs_reinit_inode(mp, inode); if (error) { - bool wake; - /* * Re-initializing the inode failed, and we are in deep * trouble. Try to re-add it to the reclaim list. */ rcu_read_lock(); spin_lock(&ip->i_flags_lock); - wake = !!__xfs_iflags_test(ip, XFS_INEW); ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); - if (wake) - wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); ASSERT(ip->i_flags & XFS_IRECLAIMABLE); spin_unlock(&ip->i_flags_lock); rcu_read_unlock(); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 64b9bf334806..6771f357ad2c 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3122,7 +3122,6 @@ xfs_rename( * appropriately. */ if (flags & RENAME_WHITEOUT) { - ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE))); error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip); if (error) return error; diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index e635a3d64cba..c447bf04205a 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -231,8 +231,7 @@ static inline bool xfs_inode_has_bigtime(struct xfs_inode *ip) #define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */ #define XFS_ISTALE (1 << 1) /* inode has been staled */ #define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */ -#define __XFS_INEW_BIT 3 /* inode has just been allocated */ -#define XFS_INEW (1 << __XFS_INEW_BIT) +#define XFS_INEW (1 << 3) /* inode has just been allocated */ #define XFS_IPRESERVE_DM_FIELDS (1 << 4) /* has legacy DMAPI fields set */ #define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */ #define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */ @@ -492,7 +491,6 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip) xfs_iflags_clear(ip, XFS_INEW); barrier(); unlock_new_inode(VFS_I(ip)); - wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); } static inline void xfs_setup_existing_inode(struct xfs_inode *ip) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3de38d7f0ced..0ceb54c6342f 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -732,6 +732,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info); void bpf_trampoline_put(struct bpf_trampoline *tr); +int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs); #define BPF_DISPATCHER_INIT(_name) { \ .mutex = __MUTEX_INITIALIZER(_name.mutex), \ .func = &_name##_func, \ @@ -1081,7 +1082,7 @@ struct bpf_array { }; #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ -#define MAX_TAIL_CALL_CNT 32 +#define MAX_TAIL_CALL_CNT 33 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ BPF_F_RDONLY_PROG | \ @@ -1352,28 +1353,16 @@ extern struct mutex bpf_stats_enabled_mutex; * kprobes, tracepoints) to prevent deadlocks on map operations as any of * these events can happen inside a region which holds a map bucket lock * and can deadlock on it. - * - * Use the preemption safe inc/dec variants on RT because migrate disable - * is preemptible on RT and preemption in the middle of the RMW operation - * might lead to inconsistent state. Use the raw variants for non RT - * kernels as migrate_disable() maps to preempt_disable() so the slightly - * more expensive save operation can be avoided. */ static inline void bpf_disable_instrumentation(void) { migrate_disable(); - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_inc(bpf_prog_active); - else - __this_cpu_inc(bpf_prog_active); + this_cpu_inc(bpf_prog_active); } static inline void bpf_enable_instrumentation(void) { - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_dec(bpf_prog_active); - else - __this_cpu_dec(bpf_prog_active); + this_cpu_dec(bpf_prog_active); migrate_enable(); } @@ -1733,6 +1722,14 @@ bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); const struct btf_func_model * bpf_jit_find_kfunc_model(const struct bpf_prog *prog, const struct bpf_insn *insn); +struct bpf_core_ctx { + struct bpf_verifier_log *log; + const struct btf *btf; +}; + +int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, + int relo_idx, void *insn); + #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -2165,6 +2162,7 @@ extern const struct bpf_func_proto bpf_sk_setsockopt_proto; extern const struct bpf_func_proto bpf_sk_getsockopt_proto; extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto; extern const struct bpf_func_proto bpf_find_vma_proto; +extern const struct bpf_func_proto bpf_loop_proto; const struct bpf_func_proto *tracing_prog_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index c8a78e830fca..182b16a91084 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -396,6 +396,13 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) log->level == BPF_LOG_KERNEL); } +static inline bool +bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log) +{ + return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 && + log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK); +} + #define BPF_MAX_SUBPROGS 256 struct bpf_subprog_info { diff --git a/include/linux/btf.h b/include/linux/btf.h index 203eef993d76..0c74348cbc9d 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -144,6 +144,53 @@ static inline bool btf_type_is_enum(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM; } +static inline bool str_is_empty(const char *s) +{ + return !s || !s[0]; +} + +static inline u16 btf_kind(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info); +} + +static inline bool btf_is_enum(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_ENUM; +} + +static inline bool btf_is_composite(const struct btf_type *t) +{ + u16 kind = btf_kind(t); + + return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION; +} + +static inline bool btf_is_array(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_ARRAY; +} + +static inline bool btf_is_int(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_INT; +} + +static inline bool btf_is_ptr(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_PTR; +} + +static inline u8 btf_int_offset(const struct btf_type *t) +{ + return BTF_INT_OFFSET(*(u32 *)(t + 1)); +} + +static inline u8 btf_int_encoding(const struct btf_type *t) +{ + return BTF_INT_ENCODING(*(u32 *)(t + 1)); +} + static inline bool btf_type_is_scalar(const struct btf_type *t) { return btf_type_is_int(t) || btf_type_is_enum(t); @@ -184,6 +231,11 @@ static inline u16 btf_type_vlen(const struct btf_type *t) return BTF_INFO_VLEN(t->info); } +static inline u16 btf_vlen(const struct btf_type *t) +{ + return btf_type_vlen(t); +} + static inline u16 btf_func_linkage(const struct btf_type *t) { return BTF_INFO_VLEN(t->info); @@ -194,25 +246,54 @@ static inline bool btf_type_kflag(const struct btf_type *t) return BTF_INFO_KFLAG(t->info); } -static inline u32 btf_member_bit_offset(const struct btf_type *struct_type, - const struct btf_member *member) +static inline u32 __btf_member_bit_offset(const struct btf_type *struct_type, + const struct btf_member *member) { return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset) : member->offset; } -static inline u32 btf_member_bitfield_size(const struct btf_type *struct_type, - const struct btf_member *member) +static inline u32 __btf_member_bitfield_size(const struct btf_type *struct_type, + const struct btf_member *member) { return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset) : 0; } +static inline struct btf_member *btf_members(const struct btf_type *t) +{ + return (struct btf_member *)(t + 1); +} + +static inline u32 btf_member_bit_offset(const struct btf_type *t, u32 member_idx) +{ + const struct btf_member *m = btf_members(t) + member_idx; + + return __btf_member_bit_offset(t, m); +} + +static inline u32 btf_member_bitfield_size(const struct btf_type *t, u32 member_idx) +{ + const struct btf_member *m = btf_members(t) + member_idx; + + return __btf_member_bitfield_size(t, m); +} + static inline const struct btf_member *btf_type_member(const struct btf_type *t) { return (const struct btf_member *)(t + 1); } +static inline struct btf_array *btf_array(const struct btf_type *t) +{ + return (struct btf_array *)(t + 1); +} + +static inline struct btf_enum *btf_enum(const struct btf_type *t) +{ + return (struct btf_enum *)(t + 1); +} + static inline const struct btf_var_secinfo *btf_type_var_secinfo( const struct btf_type *t) { @@ -245,7 +326,10 @@ struct kfunc_btf_id_set { struct module *owner; }; -struct kfunc_btf_id_list; +struct kfunc_btf_id_list { + struct list_head list; + struct mutex mutex; +}; #ifdef CONFIG_DEBUG_INFO_BTF_MODULES void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, @@ -254,6 +338,9 @@ void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, struct kfunc_btf_id_set *s); bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, struct module *owner); + +extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list; +extern struct kfunc_btf_id_list prog_test_kfunc_list; #else static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, struct kfunc_btf_id_set *s) @@ -268,13 +355,13 @@ static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, { return false; } + +static struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list __maybe_unused; +static struct kfunc_btf_id_list prog_test_kfunc_list __maybe_unused; #endif #define DEFINE_KFUNC_BTF_ID_SET(set, name) \ struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \ THIS_MODULE } -extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list; -extern struct kfunc_btf_id_list prog_test_kfunc_list; - #endif diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 2f909ed084c6..4ff37cb763ae 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -3,7 +3,6 @@ #define _LINUX_CACHEINFO_H #include <linux/bitops.h> -#include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/smp.h> diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h index 20b50baf3a02..a81652d1c6f3 100644 --- a/include/linux/can/bittiming.h +++ b/include/linux/can/bittiming.h @@ -12,13 +12,6 @@ #define CAN_SYNC_SEG 1 -/* Kilobits and Megabits per second */ -#define CAN_KBPS 1000UL -#define CAN_MBPS 1000000UL - -/* Megahertz */ -#define CAN_MHZ 1000000UL - #define CAN_CTRLMODE_TDC_MASK \ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL) diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h index a498ebcf4993..15e7c5e15d62 100644 --- a/include/linux/device/driver.h +++ b/include/linux/device/driver.h @@ -18,6 +18,7 @@ #include <linux/klist.h> #include <linux/pm.h> #include <linux/device/bus.h> +#include <linux/module.h> /** * enum probe_type - device driver probe type to try diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 254b165f2b44..939a1beaddf7 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -7,6 +7,7 @@ #include <linux/refcount.h> #include <linux/types.h> +#include <net/dsa.h> struct dsa_switch; struct dsa_port; @@ -37,14 +38,12 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id); int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, - struct net_device *br, - int bridge_num); + struct dsa_bridge bridge); void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, - struct net_device *br, - int bridge_num); + struct dsa_bridge bridge); -u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num); +u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num); u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp); diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h index 7ee708ad7df2..dca2969015d8 100644 --- a/include/linux/dsa/ocelot.h +++ b/include/linux/dsa/ocelot.h @@ -8,6 +8,7 @@ #include <linux/kthread.h> #include <linux/packing.h> #include <linux/skbuff.h> +#include <net/dsa.h> struct ocelot_skb_cb { struct sk_buff *clone; @@ -168,11 +169,18 @@ struct felix_deferred_xmit_work { struct kthread_work work; }; -struct felix_port { +struct ocelot_8021q_tagger_data { void (*xmit_work_fn)(struct kthread_work *work); - struct kthread_worker *xmit_worker; }; +static inline struct ocelot_8021q_tagger_data * +ocelot_8021q_tagger_data(struct dsa_switch *ds) +{ + BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_OCELOT_8021Q); + + return ds->tagger_data; +} + static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val) { packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0); diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index e6c78be40bde..159e43171ccc 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -35,23 +35,26 @@ #define SJA1105_META_SMAC 0x222222222222ull #define SJA1105_META_DMAC 0x0180C200000Eull -#define SJA1105_HWTS_RX_EN 0 +enum sja1110_meta_tstamp { + SJA1110_META_TSTAMP_TX = 0, + SJA1110_META_TSTAMP_RX = 1, +}; -/* Global tagger data: each struct sja1105_port has a reference to - * the structure defined in struct sja1105_private. - */ +struct sja1105_deferred_xmit_work { + struct dsa_port *dp; + struct sk_buff *skb; + struct kthread_work work; +}; + +/* Global tagger data */ struct sja1105_tagger_data { - struct sk_buff *stampable_skb; - /* Protects concurrent access to the meta state machine - * from taggers running on multiple ports on SMP systems - */ - spinlock_t meta_lock; - unsigned long state; - u8 ts_id; - /* Used on SJA1110 where meta frames are generated only for - * 2-step TX timestamps - */ - struct sk_buff_head skb_txtstamp_queue; + /* Tagger to switch */ + void (*xmit_work_fn)(struct kthread_work *work); + void (*meta_tstamp_handler)(struct dsa_switch *ds, int port, u8 ts_id, + enum sja1110_meta_tstamp dir, u64 tstamp); + /* Switch to tagger */ + bool (*rxtstamp_get_state)(struct dsa_switch *ds); + void (*rxtstamp_set_state)(struct dsa_switch *ds, bool on); }; struct sja1105_skb_cb { @@ -64,32 +67,13 @@ struct sja1105_skb_cb { #define SJA1105_SKB_CB(skb) \ ((struct sja1105_skb_cb *)((skb)->cb)) -struct sja1105_port { - struct kthread_worker *xmit_worker; - struct kthread_work xmit_work; - struct sk_buff_head xmit_queue; - struct sja1105_tagger_data *data; - bool hwts_tx_en; -}; - -/* Timestamps are in units of 8 ns clock ticks (equivalent to - * a fixed 125 MHz clock). - */ -#define SJA1105_TICK_NS 8 - -static inline s64 ns_to_sja1105_ticks(s64 ns) +static inline struct sja1105_tagger_data * +sja1105_tagger_data(struct dsa_switch *ds) { - return ns / SJA1105_TICK_NS; -} + BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1105 && + ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1110); -static inline s64 sja1105_ticks_to_ns(s64 ticks) -{ - return ticks * SJA1105_TICK_NS; -} - -static inline bool dsa_port_is_sja1105(struct dsa_port *dp) -{ - return true; + return ds->tagger_data; } #endif /* _NET_DSA_SJA1105_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index b6a216eb217a..68c6b5c208e7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -6,6 +6,7 @@ #define __LINUX_FILTER_H__ #include <linux/atomic.h> +#include <linux/bpf.h> #include <linux/refcount.h> #include <linux/compat.h> #include <linux/skbuff.h> @@ -26,7 +27,6 @@ #include <asm/byteorder.h> #include <uapi/linux/filter.h> -#include <uapi/linux/bpf.h> struct sk_buff; struct sock; @@ -640,9 +640,6 @@ static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void * This uses migrate_disable/enable() explicitly to document that the * invocation of a BPF program does not require reentrancy protection * against a BPF program which is invoked from a preempting task. - * - * For non RT enabled kernels migrate_disable/enable() maps to - * preempt_disable/enable(), i.e. it disables also preemption. */ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, const void *ctx) diff --git a/include/linux/hid.h b/include/linux/hid.h index 9e067f937dbc..f453be385bd4 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -840,6 +840,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev, return hdev->ll_driver == driver; } +static inline bool hid_is_usb(struct hid_device *hdev) +{ + return hid_is_using_ll_driver(hdev, &usb_hid_driver); +} + #define PM_HINT_FULLON 1<<5 #define PM_HINT_NORMAL 1<<1 diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h index d984694c384d..d75601d613cc 100644 --- a/include/linux/if_eql.h +++ b/include/linux/if_eql.h @@ -26,6 +26,7 @@ typedef struct slave { struct list_head list; struct net_device *dev; + netdevice_tracker dev_tracker; long priority; long priority_bps; long priority_Bps; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 41a518336673..8420fe504927 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -162,6 +162,7 @@ struct netpoll; * @vlan_id: VLAN identifier * @flags: device flags * @real_dev: underlying netdevice + * @dev_tracker: refcount tracker for @real_dev reference * @real_dev_addr: address of underlying netdevice * @dent: proc dir entry * @vlan_pcpu_stats: ptr to percpu rx stats @@ -177,6 +178,8 @@ struct vlan_dev_priv { u16 flags; struct net_device *real_dev; + netdevice_tracker dev_tracker; + unsigned char real_dev_addr[ETH_ALEN]; struct proc_dir_entry *dent; diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 518b484a7f07..674aeead6260 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -24,6 +24,8 @@ struct ipv4_devconf { struct in_device { struct net_device *dev; + netdevice_tracker dev_tracker; + refcount_t refcnt; int dead; struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e974caf39d3e..8c8f7a4d93af 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -153,6 +153,8 @@ struct kretprobe { struct kretprobe_holder *rph; }; +#define KRETPROBE_MAX_DATA_SIZE 4096 + struct kretprobe_instance { union { struct freelist_node freelist; diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index c6786c12b207..df1fab44ea5c 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -117,4 +117,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev); int mei_cldev_disable(struct mei_cl_device *cldev); bool mei_cldev_enabled(const struct mei_cl_device *cldev); +void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size); +int mei_cldev_dma_unmap(struct mei_cl_device *cldev); + #endif /* _LINUX_MEI_CL_BUS_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index a623ec635947..78655d8d13a7 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -478,6 +478,10 @@ struct mlx5_fc_stats { unsigned long next_query; unsigned long sampling_interval; /* jiffies */ u32 *bulk_query_out; + int bulk_query_len; + size_t num_counters; + bool bulk_query_alloc_failed; + unsigned long next_bulk_query_alloc; struct mlx5_fc_pool fc_pool; }; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 3636df90899a..fbaab440a484 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -9698,7 +9698,10 @@ struct mlx5_ifc_mcam_access_reg_bits { u8 regs_84_to_68[0x11]; u8 tracer_registers[0x4]; - u8 regs_63_to_32[0x20]; + u8 regs_63_to_46[0x12]; + u8 mrtc[0x1]; + u8 regs_44_to_32[0xd]; + u8 regs_31_to_0[0x20]; }; diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index 8071148f29a6..e05ee9f001ff 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -12,6 +12,7 @@ /** * struct vif_device - interface representor for multicast routing * @dev: network device being used + * @dev_tracker: refcount tracker for @dev reference * @bytes_in: statistic; bytes ingressing * @bytes_out: statistic; bytes egresing * @pkt_in: statistic; packets ingressing @@ -26,6 +27,7 @@ */ struct vif_device { struct net_device *dev; + netdevice_tracker dev_tracker; unsigned long bytes_in, bytes_out; unsigned long pkt_in, pkt_out; unsigned long rate_limit; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index db3bff1ae7fd..a419718612c6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -48,6 +48,7 @@ #include <uapi/linux/pkt_cls.h> #include <linux/hashtable.h> #include <linux/rbtree.h> +#include <net/net_trackers.h> struct netpoll_info; struct device; @@ -299,7 +300,6 @@ enum netdev_state_t { __LINK_STATE_TESTING, }; - struct gro_list { struct list_head list; int count; @@ -579,6 +579,8 @@ struct netdev_queue { * read-mostly part */ struct net_device *dev; + netdevice_tracker dev_tracker; + struct Qdisc __rcu *qdisc; struct Qdisc *qdisc_sleeping; #ifdef CONFIG_SYSFS @@ -734,6 +736,8 @@ struct netdev_rx_queue { #endif struct kobject kobj; struct net_device *dev; + netdevice_tracker dev_tracker; + #ifdef CONFIG_XDP_SOCKETS struct xsk_buff_pool *pool; #endif @@ -1865,6 +1869,7 @@ enum netdev_ml_priv_type { * @proto_down_reason: reason a netdev interface is held down * @pcpu_refcnt: Number of references to this device * @dev_refcnt: Number of references to this device + * @refcnt_tracker: Tracker directory for tracked references to this device * @todo_list: Delayed register/unregister * @link_watch_list: XXX: need comments on this one * @@ -1938,6 +1943,8 @@ enum netdev_ml_priv_type { * keep a list of interfaces to be deleted. * * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. + * @linkwatch_dev_tracker: refcount tracker used by linkwatch. + * @watchdog_dev_tracker: refcount tracker used by watchdog. * * FIXME: cleanup struct net_device such that network protocol info * moves out. @@ -2178,6 +2185,7 @@ struct net_device { #else refcount_t dev_refcnt; #endif + struct ref_tracker_dir refcnt_tracker; struct list_head link_watch_list; @@ -2267,6 +2275,8 @@ struct net_device { struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; u8 dev_addr_shadow[MAX_ADDR_LEN]; + netdevice_tracker linkwatch_dev_tracker; + netdevice_tracker watchdog_dev_tracker; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2523,6 +2533,7 @@ struct packet_type { __be16 type; /* This is really htons(ether_type). */ bool ignore_outgoing; struct net_device *dev; /* NULL is wildcarded here */ + netdevice_tracker dev_tracker; int (*func) (struct sk_buff *, struct net_device *, struct packet_type *, @@ -3805,6 +3816,7 @@ void netdev_run_todo(void); * @dev: network device * * Release reference to device to allow it to be freed. + * Try using dev_put_track() instead. */ static inline void dev_put(struct net_device *dev) { @@ -3822,6 +3834,7 @@ static inline void dev_put(struct net_device *dev) * @dev: network device * * Hold reference to device to keep it from being freed. + * Try using dev_hold_track() instead. */ static inline void dev_hold(struct net_device *dev) { @@ -3834,6 +3847,55 @@ static inline void dev_hold(struct net_device *dev) } } +static inline void netdev_tracker_alloc(struct net_device *dev, + netdevice_tracker *tracker, gfp_t gfp) +{ +#ifdef CONFIG_NET_DEV_REFCNT_TRACKER + ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); +#endif +} + +static inline void netdev_tracker_free(struct net_device *dev, + netdevice_tracker *tracker) +{ +#ifdef CONFIG_NET_DEV_REFCNT_TRACKER + ref_tracker_free(&dev->refcnt_tracker, tracker); +#endif +} + +static inline void dev_hold_track(struct net_device *dev, + netdevice_tracker *tracker, gfp_t gfp) +{ + if (dev) { + dev_hold(dev); + netdev_tracker_alloc(dev, tracker, gfp); + } +} + +static inline void dev_put_track(struct net_device *dev, + netdevice_tracker *tracker) +{ + if (dev) { + netdev_tracker_free(dev, tracker); + dev_put(dev); + } +} + +static inline void dev_replace_track(struct net_device *odev, + struct net_device *ndev, + netdevice_tracker *tracker, + gfp_t gfp) +{ + if (odev) + netdev_tracker_free(odev, tracker); + + dev_hold(ndev); + dev_put(odev); + + if (ndev) + netdev_tracker_alloc(ndev, tracker, gfp); +} + /* Carrier loss detection, dial on demand. The functions netif_carrier_on * and _off may be called from IRQ context, but it is caller * who is responsible for serialization of these calls. @@ -4053,7 +4115,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) { spin_lock(&txq->_xmit_lock); - txq->xmit_lock_owner = cpu; + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, cpu); } static inline bool __netif_tx_acquire(struct netdev_queue *txq) @@ -4070,26 +4133,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq) static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); - txq->xmit_lock_owner = smp_processor_id(); + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); } static inline bool __netif_tx_trylock(struct netdev_queue *txq) { bool ok = spin_trylock(&txq->_xmit_lock); - if (likely(ok)) - txq->xmit_lock_owner = smp_processor_id(); + + if (likely(ok)) { + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); + } return ok; } static inline void __netif_tx_unlock(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, -1); spin_unlock(&txq->_xmit_lock); } static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, -1); spin_unlock_bh(&txq->_xmit_lock); } diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index e6a2d72e0dc7..bd19c4b91e31 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -24,6 +24,7 @@ union inet_addr { struct netpoll { struct net_device *dev; + netdevice_tracker dev_tracker; char dev_name[IFNAMSIZ]; const char *name; diff --git a/include/linux/phy.h b/include/linux/phy.h index 1e57cdd95da3..6de8d7a90d78 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -538,11 +538,12 @@ struct macsec_ops; * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY * @state: State of the PHY for management purposes * @dev_flags: Device-specific flags used by the PHY driver. - * Bits [15:0] are free to use by the PHY driver to communicate - * driver specific behavior. - * Bits [23:16] are currently reserved for future use. - * Bits [31:24] are reserved for defining generic - * PHY driver behavior. + * + * - Bits [15:0] are free to use by the PHY driver to communicate + * driver specific behavior. + * - Bits [23:16] are currently reserved for future use. + * - Bits [31:24] are reserved for defining generic + * PHY driver behavior. * @irq: IRQ number of the PHY's interrupt (-1 if none) * @phy_timer: The timer for handling the state machine * @phylink: Pointer to phylink instance for this PHY diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 01224235df0f..a2f266cc3442 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -84,6 +84,8 @@ enum phylink_op_type { * struct phylink_config - PHYLINK configuration structure * @dev: a pointer to a struct device associated with the MAC * @type: operation type of PHYLINK instance + * @legacy_pre_march2020: driver has not been updated for March 2020 updates + * (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls") * @pcs_poll: MAC PCS cannot provide link change interrupt * @poll_fixed_state: if true, starts link_poll, * if MAC link is at %MLO_AN_FIXED mode. @@ -97,6 +99,7 @@ enum phylink_op_type { struct phylink_config { struct device *dev; enum phylink_op_type type; + bool legacy_pre_march2020; bool pcs_poll; bool poll_fixed_state; bool ovr_an_inband; @@ -187,6 +190,10 @@ void validate(struct phylink_config *config, unsigned long *supported, * negotiation completion state in @state->an_complete, and link up state * in @state->link. If possible, @state->lp_advertising should also be * populated. + * + * Note: This is a legacy method. This function will not be called unless + * legacy_pre_march2020 is set in &struct phylink_config and there is no + * PCS attached. */ void mac_pcs_get_state(struct phylink_config *config, struct phylink_link_state *state); @@ -227,6 +234,15 @@ int mac_prepare(struct phylink_config *config, unsigned int mode, * guaranteed to be correct, and so any mac_config() implementation must * never reference these fields. * + * Note: For legacy March 2020 drivers (drivers with legacy_pre_march2020 set + * in their &phylnk_config and which don't have a PCS), this function will be + * called on each link up event, and to also change the in-band advert. For + * non-legacy drivers, it will only be called to reconfigure the MAC for a + * "major" change in e.g. interface mode. It will not be called for changes + * in speed, duplex or pause modes or to change the in-band advertisement. + * In any case, it is strongly preferred that speed, duplex and pause settings + * are handled in the mac_link_up() method and not in this method. + * * (this requires a rewrite - please refer to mac_link_up() for situations * where the PCS and MAC are not tightly integrated.) * @@ -311,6 +327,10 @@ int mac_finish(struct phylink_config *config, unsigned int mode, /** * mac_an_restart() - restart 802.3z BaseX autonegotiation * @config: a pointer to a &struct phylink_config. + * + * Note: This is a legacy method. This function will not be called unless + * legacy_pre_march2020 is set in &struct phylink_config and there is no + * PCS attached. */ void mac_an_restart(struct phylink_config *config); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 0dae7fcc5ef2..6dc4943d8aec 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -652,6 +652,7 @@ struct qed_dev_info { bool wol_support; bool smart_an; + bool esl; /* MBI version */ u32 mbi_version; @@ -807,6 +808,12 @@ struct qed_devlink { struct devlink_health_reporter *fw_reporter; }; +struct qed_sb_info_dbg { + u32 igu_prod; + u32 igu_cons; + u16 pi[PIS_PER_SB]; +}; + struct qed_common_cb_ops { void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); void (*link_update)(void *dev, struct qed_link_output *link); @@ -1194,6 +1201,13 @@ struct qed_common_ops { struct devlink* (*devlink_register)(struct qed_dev *cdev); void (*devlink_unregister)(struct devlink *devlink); + + __printf(2, 3) void (*mfw_report)(struct qed_dev *cdev, char *fmt, ...); + + int (*get_sb_info)(struct qed_dev *cdev, struct qed_sb_info *sb, + u16 qid, struct qed_sb_info_dbg *sb_dbg); + + int (*get_esl_status)(struct qed_dev *cdev, bool *esl_active); }; #define MASK_FIELD(_name, _value) \ diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h new file mode 100644 index 000000000000..c11c9db5825c --- /dev/null +++ b/include/linux/ref_tracker.h @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#ifndef _LINUX_REF_TRACKER_H +#define _LINUX_REF_TRACKER_H +#include <linux/refcount.h> +#include <linux/types.h> +#include <linux/spinlock.h> + +struct ref_tracker; + +struct ref_tracker_dir { +#ifdef CONFIG_REF_TRACKER + spinlock_t lock; + unsigned int quarantine_avail; + refcount_t untracked; + struct list_head list; /* List of active trackers */ + struct list_head quarantine; /* List of dead trackers */ +#endif +}; + +#ifdef CONFIG_REF_TRACKER +static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, + unsigned int quarantine_count) +{ + INIT_LIST_HEAD(&dir->list); + INIT_LIST_HEAD(&dir->quarantine); + spin_lock_init(&dir->lock); + dir->quarantine_avail = quarantine_count; + refcount_set(&dir->untracked, 1); +} + +void ref_tracker_dir_exit(struct ref_tracker_dir *dir); + +void ref_tracker_dir_print(struct ref_tracker_dir *dir, + unsigned int display_limit); + +int ref_tracker_alloc(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp, gfp_t gfp); + +int ref_tracker_free(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp); + +#else /* CONFIG_REF_TRACKER */ + +static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, + unsigned int quarantine_count) +{ +} + +static inline void ref_tracker_dir_exit(struct ref_tracker_dir *dir) +{ +} + +static inline void ref_tracker_dir_print(struct ref_tracker_dir *dir, + unsigned int display_limit) +{ +} + +static inline int ref_tracker_alloc(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp, + gfp_t gfp) +{ + return 0; +} + +static inline int ref_tracker_free(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp) +{ + return 0; +} + +#endif + +#endif /* _LINUX_REF_TRACKER_H */ diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index bd7a73db2e66..54cf566616ae 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -499,7 +499,8 @@ struct regulator_irq_data { * best to shut-down regulator(s) or reboot the SOC if error * handling is repeatedly failing. If fatal_cnt is given the IRQ * handling is aborted if it fails for fatal_cnt times and die() - * callback (if populated) or BUG() is called to try to prevent + * callback (if populated) is called. If die() is not populated + * poweroff for the system is attempted in order to prevent any * further damage. * @reread_ms: The time which is waited before attempting to re-read status * at the worker if IC reading fails. Immediate re-read is done @@ -516,11 +517,12 @@ struct regulator_irq_data { * @data: Driver private data pointer which will be passed as such to * the renable, map_event and die callbacks in regulator_irq_data. * @die: Protection callback. If IC status reading or recovery actions - * fail fatal_cnt times this callback or BUG() is called. This - * callback should implement a final protection attempt like - * disabling the regulator. If protection succeeded this may - * return 0. If anything else is returned the core assumes final - * protection failed and calls BUG() as a last resort. + * fail fatal_cnt times this callback is called or system is + * powered off. This callback should implement a final protection + * attempt like disabling the regulator. If protection succeeded + * die() may return 0. If anything else is returned the core + * assumes final protection failed and attempts to perform a + * poweroff as a last resort. * @map_event: Driver callback to map IRQ status into regulator devices with * events / errors. NOTE: callback MUST initialize both the * errors and notifs for all rdevs which it signals having diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h index 6c9f19a33865..ce3c58286062 100644 --- a/include/linux/sched/cputime.h +++ b/include/linux/sched/cputime.h @@ -18,15 +18,16 @@ #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern void task_cputime(struct task_struct *t, +extern bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime); extern u64 task_gtime(struct task_struct *t); #else -static inline void task_cputime(struct task_struct *t, +static inline bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime) { *utime = t->utime; *stime = t->stime; + return false; } static inline u64 task_gtime(struct task_struct *t) diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h index 0fdbe1ddd8d1..b97912fdbae7 100644 --- a/include/linux/seq_file_net.h +++ b/include/linux/seq_file_net.h @@ -9,7 +9,8 @@ extern struct net init_net; struct seq_net_private { #ifdef CONFIG_NET_NS - struct net *net; + struct net *net; + netns_tracker ns_tracker; #endif }; diff --git a/include/linux/siphash.h b/include/linux/siphash.h index 3f7427b9e935..cce8a9acc76c 100644 --- a/include/linux/siphash.h +++ b/include/linux/siphash.h @@ -29,9 +29,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key) } u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); -#endif u64 siphash_1u64(const u64 a, const siphash_key_t *key); u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); @@ -84,10 +82,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len, static inline u64 siphash(const void *data, size_t len, const siphash_key_t *key) { -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) return __siphash_unaligned(data, len, key); -#endif return ___siphash_aligned(data, len, key); } @@ -98,10 +95,8 @@ typedef struct { u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key); -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key); -#endif u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); @@ -137,10 +132,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, static inline u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key) { -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) return __hsiphash_unaligned(data, len, key); -#endif return ___hsiphash_aligned(data, len, key); } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index eae4bd3237a4..6535294f6a48 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -292,9 +292,11 @@ struct tc_skb_ext { #endif struct sk_buff_head { - /* These two members must be first. */ - struct sk_buff *next; - struct sk_buff *prev; + /* These two members must be first to match sk_buff. */ + struct_group_tagged(sk_buff_list, list, + struct sk_buff *next; + struct sk_buff *prev; + ); __u32 qlen; spinlock_t lock; @@ -730,7 +732,7 @@ typedef unsigned char *sk_buff_data_t; struct sk_buff { union { struct { - /* These two members must be first. */ + /* These two members must be first to match sk_buff_head. */ struct sk_buff *next; struct sk_buff *prev; @@ -1976,8 +1978,8 @@ static inline void __skb_insert(struct sk_buff *newsk, */ WRITE_ONCE(newsk->next, next); WRITE_ONCE(newsk->prev, prev); - WRITE_ONCE(next->prev, newsk); - WRITE_ONCE(prev->next, newsk); + WRITE_ONCE(((struct sk_buff_list *)next)->prev, newsk); + WRITE_ONCE(((struct sk_buff_list *)prev)->next, newsk); WRITE_ONCE(list->qlen, list->qlen + 1); } @@ -2073,7 +2075,7 @@ static inline void __skb_queue_after(struct sk_buff_head *list, struct sk_buff *prev, struct sk_buff *newsk) { - __skb_insert(newsk, prev, prev->next, list); + __skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list); } void skb_append(struct sk_buff *old, struct sk_buff *newsk, @@ -2083,7 +2085,7 @@ static inline void __skb_queue_before(struct sk_buff_head *list, struct sk_buff *next, struct sk_buff *newsk) { - __skb_insert(newsk, next->prev, next, list); + __skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list); } /** @@ -3486,7 +3488,8 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = ~csum_partial(start, len, ~skb->csum); + skb->csum = wsum_negate(csum_partial(start, len, + wsum_negate(skb->csum))); else if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_start_offset(skb) < 0) skb->ip_summed = CHECKSUM_NONE; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 48d8a363319e..78b91bb92f0d 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -512,11 +512,13 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, int shiftlen); +void __tcp_sock_set_cork(struct sock *sk, bool on); void tcp_sock_set_cork(struct sock *sk, bool on); int tcp_sock_set_keepcnt(struct sock *sk, int val); int tcp_sock_set_keepidle_locked(struct sock *sk, int val); int tcp_sock_set_keepidle(struct sock *sk, int val); int tcp_sock_set_keepintvl(struct sock *sk, int val); +void __tcp_sock_set_nodelay(struct sock *sk, bool on); void tcp_sock_set_nodelay(struct sock *sk); void tcp_sock_set_quickack(struct sock *sk, int val); int tcp_sock_set_syncnt(struct sock *sk, int val); diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index e8ec116c916b..6ad4e9032d53 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -66,7 +66,7 @@ #include <linux/seqlock.h> struct u64_stats_sync { -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) seqcount_t seq; #endif }; @@ -125,7 +125,7 @@ static inline void u64_stats_inc(u64_stats_t *p) } #endif -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) #define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) #else static inline void u64_stats_init(struct u64_stats_sync *syncp) @@ -135,15 +135,19 @@ static inline void u64_stats_init(struct u64_stats_sync *syncp) static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_disable(); write_seqcount_begin(&syncp->seq); #endif } static inline void u64_stats_update_end(struct u64_stats_sync *syncp) { -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) write_seqcount_end(&syncp->seq); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_enable(); #endif } @@ -152,8 +156,11 @@ u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) { unsigned long flags = 0; -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) - local_irq_save(flags); +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_disable(); + else + local_irq_save(flags); write_seqcount_begin(&syncp->seq); #endif return flags; @@ -163,15 +170,18 @@ static inline void u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, unsigned long flags) { -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) write_seqcount_end(&syncp->seq); - local_irq_restore(flags); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_enable(); + else + local_irq_restore(flags); #endif } static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) return read_seqcount_begin(&syncp->seq); #else return 0; @@ -180,7 +190,7 @@ static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync * static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { -#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) +#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) preempt_disable(); #endif return __u64_stats_fetch_begin(syncp); @@ -189,7 +199,7 @@ static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *sy static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) return read_seqcount_retry(&syncp->seq, start); #else return false; @@ -199,7 +209,7 @@ static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { -#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) +#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) preempt_enable(); #endif return __u64_stats_fetch_retry(syncp, start); @@ -213,7 +223,9 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, */ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) { -#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) +#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) + preempt_disable(); +#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) local_irq_disable(); #endif return __u64_stats_fetch_begin(syncp); @@ -222,7 +234,9 @@ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, unsigned int start) { -#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) +#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) + preempt_enable(); +#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) local_irq_enable(); #endif return __u64_stats_fetch_retry(syncp, start); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 44d0e09da2d9..41edbc01ffa4 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -152,7 +152,6 @@ size_t virtio_max_dma_size(struct virtio_device *vdev); * @feature_table_size: number of entries in the feature table array. * @feature_table_legacy: same as feature_table but when working in legacy mode. * @feature_table_size_legacy: number of entries in feature table legacy array. - * @suppress_used_validation: set to not have core validate used length * @probe: the function to call when a device is found. Returns 0 or -errno. * @scan: optional function to call after successful probe; intended * for virtio-scsi to invoke a scan. @@ -169,7 +168,6 @@ struct virtio_driver { unsigned int feature_table_size; const unsigned int *feature_table_legacy; unsigned int feature_table_size_legacy; - bool suppress_used_validation; int (*validate)(struct virtio_device *dev); int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); diff --git a/include/linux/wwan.h b/include/linux/wwan.h index 1646aa3e6779..e143c88bf4b0 100644 --- a/include/linux/wwan.h +++ b/include/linux/wwan.h @@ -171,6 +171,13 @@ int wwan_register_ops(struct device *parent, const struct wwan_ops *ops, void wwan_unregister_ops(struct device *parent); +#ifdef CONFIG_WWAN_DEBUGFS struct dentry *wwan_get_debugfs_dir(struct device *parent); +#else +static inline struct dentry *wwan_get_debugfs_dir(struct device *parent) +{ + return ERR_PTR(-ENODEV); +} +#endif #endif /* __WWAN_H */ diff --git a/include/net/ax25.h b/include/net/ax25.h index 03d409de61ad..526e49589197 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -229,7 +229,10 @@ struct ctl_table; typedef struct ax25_dev { struct ax25_dev *next; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct net_device *forward; struct ctl_table_header *sysheader; int values[AX25_MAX_VALUES]; diff --git a/include/net/bareudp.h b/include/net/bareudp.h index dc65a0d71d9b..17610c8d6361 100644 --- a/include/net/bareudp.h +++ b/include/net/bareudp.h @@ -3,21 +3,10 @@ #ifndef __NET_BAREUDP_H #define __NET_BAREUDP_H +#include <linux/netdevice.h> #include <linux/types.h> -#include <linux/skbuff.h> #include <net/rtnetlink.h> -struct bareudp_conf { - __be16 ethertype; - __be16 port; - u16 sport_min; - bool multi_proto_mode; -}; - -struct net_device *bareudp_dev_create(struct net *net, const char *name, - u8 name_assign_type, - struct bareudp_conf *info); - static inline bool netif_is_bareudp(const struct net_device *dev) { return dev->rtnl_link_ops && diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h index f6af76c87a6c..191c36afa1f4 100644 --- a/include/net/bond_alb.h +++ b/include/net/bond_alb.h @@ -126,7 +126,7 @@ struct tlb_slave_info { struct alb_bond_info { struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ u32 unbalanced_load; - int tx_rebalance_counter; + atomic_t tx_rebalance_counter; int lp_counter; /* -------- rlb parameters -------- */ int rlb_enabled; diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 4202c609bb0b..c4898fcbf923 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -133,6 +133,19 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id)) WRITE_ONCE(sk->sk_napi_id, skb->napi_id); #endif + sk_rx_queue_update(sk, skb); +} + +/* Variant of sk_mark_napi_id() for passive flow setup, + * as sk->sk_napi_id and sk->sk_rx_queue_mapping content + * needs to be set. + */ +static inline void sk_mark_napi_id_set(struct sock *sk, + const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + WRITE_ONCE(sk->sk_napi_id, skb->napi_id); +#endif sk_rx_queue_set(sk, skb); } diff --git a/include/net/checksum.h b/include/net/checksum.h index 5b96d5bd6e54..5218041e5c8f 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -180,4 +180,8 @@ static inline void remcsum_unadjust(__sum16 *psum, __wsum delta) *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum)); } +static inline __wsum wsum_negate(__wsum val) +{ + return (__force __wsum)-((__force u32)val); +} #endif diff --git a/include/net/devlink.h b/include/net/devlink.h index 043fcec8b0aa..3276a29f2b81 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -664,13 +664,17 @@ struct devlink_health_reporter_ops { * @trap_name: Trap name. * @trap_group_name: Trap group name. * @input_dev: Input netdevice. + * @dev_tracker: refcount tracker for @input_dev. * @fa_cookie: Flow action user cookie. * @trap_type: Trap type. */ struct devlink_trap_metadata { const char *trap_name; const char *trap_group_name; + struct net_device *input_dev; + netdevice_tracker dev_tracker; + const struct flow_action_cookie *fa_cookie; enum devlink_trap_type trap_type; }; diff --git a/include/net/dsa.h b/include/net/dsa.h index eff5c44ba377..f16959444ae1 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -88,6 +88,8 @@ struct dsa_device_ops { struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto, int *offset); + int (*connect)(struct dsa_switch *ds); + void (*disconnect)(struct dsa_switch *ds); unsigned int needed_headroom; unsigned int needed_tailroom; const char *name; @@ -219,6 +221,12 @@ struct dsa_mall_tc_entry { }; }; +struct dsa_bridge { + struct net_device *dev; + unsigned int num; + bool tx_fwd_offload; + refcount_t refcount; +}; struct dsa_port { /* A CPU port is physically connected to a master device. @@ -256,8 +264,7 @@ struct dsa_port { /* Managed by DSA on user ports and by drivers on CPU and DSA ports */ bool learning; u8 stp_state; - struct net_device *bridge_dev; - int bridge_num; + struct dsa_bridge *bridge; struct devlink_port devlink_port; bool devlink_port_setup; struct phylink *pl; @@ -269,12 +276,6 @@ struct dsa_port { struct list_head list; /* - * Give the switch driver somewhere to hang its per-port private data - * structures (accessible from the tagger). - */ - void *priv; - - /* * Original copy of the master netdev ethtool_ops */ const struct ethtool_ops *orig_ethtool_ops; @@ -332,6 +333,8 @@ struct dsa_switch { */ void *priv; + void *tagger_data; + /* * Configuration data for this switch. */ @@ -413,12 +416,12 @@ struct dsa_switch { */ unsigned int num_lag_ids; - /* Drivers that support bridge forwarding offload should set this to - * the maximum number of bridges spanning the same switch tree (or all - * trees, in the case of cross-tree bridging support) that can be - * offloaded. + /* Drivers that support bridge forwarding offload or FDB isolation + * should set this to the maximum number of bridges spanning the same + * switch tree (or all trees, in the case of cross-tree bridging + * support) that can be offloaded. */ - unsigned int num_fwd_offloading_bridges; + unsigned int max_num_bridges; size_t num_ports; }; @@ -588,7 +591,7 @@ static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp) static inline struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp) { - if (!dp->bridge_dev) + if (!dp->bridge) return NULL; if (dp->lag_dev) @@ -599,6 +602,76 @@ struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp) return dp->slave; } +static inline struct net_device * +dsa_port_bridge_dev_get(const struct dsa_port *dp) +{ + return dp->bridge ? dp->bridge->dev : NULL; +} + +static inline unsigned int dsa_port_bridge_num_get(struct dsa_port *dp) +{ + return dp->bridge ? dp->bridge->num : 0; +} + +static inline bool dsa_port_bridge_same(const struct dsa_port *a, + const struct dsa_port *b) +{ + struct net_device *br_a = dsa_port_bridge_dev_get(a); + struct net_device *br_b = dsa_port_bridge_dev_get(b); + + /* Standalone ports are not in the same bridge with one another */ + return (!br_a || !br_b) ? false : (br_a == br_b); +} + +static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp, + const struct net_device *dev) +{ + return dsa_port_to_bridge_port(dp) == dev; +} + +static inline bool +dsa_port_offloads_bridge_dev(struct dsa_port *dp, + const struct net_device *bridge_dev) +{ + /* DSA ports connected to a bridge, and event was emitted + * for the bridge. + */ + return dsa_port_bridge_dev_get(dp) == bridge_dev; +} + +static inline bool dsa_port_offloads_bridge(struct dsa_port *dp, + const struct dsa_bridge *bridge) +{ + return dsa_port_bridge_dev_get(dp) == bridge->dev; +} + +/* Returns true if any port of this tree offloads the given net_device */ +static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst, + const struct net_device *dev) +{ + struct dsa_port *dp; + + list_for_each_entry(dp, &dst->ports, list) + if (dsa_port_offloads_bridge_port(dp, dev)) + return true; + + return false; +} + +/* Returns true if any port of this tree offloads the given bridge */ +static inline bool +dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst, + const struct net_device *bridge_dev) +{ + struct dsa_port *dp; + + list_for_each_entry(dp, &dst->ports, list) + if (dsa_port_offloads_bridge_dev(dp, bridge_dev)) + return true; + + return false; +} + typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid, bool is_static, void *data); struct dsa_switch_ops { @@ -614,6 +687,13 @@ struct dsa_switch_ops { enum dsa_tag_protocol mprot); int (*change_tag_protocol)(struct dsa_switch *ds, int port, enum dsa_tag_protocol proto); + /* + * Method for switch drivers to connect to the tagging protocol driver + * in current use. The switch driver can provide handlers for certain + * types of packets for switch management. + */ + int (*connect_tag_protocol)(struct dsa_switch *ds, + enum dsa_tag_protocol proto); /* Optional switch-wide initialization and destruction methods */ int (*setup)(struct dsa_switch *ds); @@ -645,8 +725,8 @@ struct dsa_switch_ops { /* * PHYLINK integration */ - void (*phylink_get_interfaces)(struct dsa_switch *ds, int port, - unsigned long *supported_interfaces); + void (*phylink_get_caps)(struct dsa_switch *ds, int port, + struct phylink_config *config); void (*phylink_validate)(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state); @@ -748,17 +828,10 @@ struct dsa_switch_ops { */ int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs); int (*port_bridge_join)(struct dsa_switch *ds, int port, - struct net_device *bridge); + struct dsa_bridge bridge, + bool *tx_fwd_offload); void (*port_bridge_leave)(struct dsa_switch *ds, int port, - struct net_device *bridge); - /* Called right after .port_bridge_join() */ - int (*port_bridge_tx_fwd_offload)(struct dsa_switch *ds, int port, - struct net_device *bridge, - int bridge_num); - /* Called right before .port_bridge_leave() */ - void (*port_bridge_tx_fwd_unoffload)(struct dsa_switch *ds, int port, - struct net_device *bridge, - int bridge_num); + struct dsa_bridge bridge); void (*port_stp_state_set)(struct dsa_switch *ds, int port, u8 state); void (*port_fast_age)(struct dsa_switch *ds, int port); @@ -830,10 +903,10 @@ struct dsa_switch_ops { */ int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index, int sw_index, int port, - struct net_device *br); + struct dsa_bridge bridge); void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index, int sw_index, int port, - struct net_device *br); + struct dsa_bridge bridge); int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index, int port); int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index, diff --git a/include/net/dst.h b/include/net/dst.h index a057319aabef..6aa252c3fc55 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -77,6 +77,7 @@ struct dst_entry { #ifndef CONFIG_64BIT atomic_t __refcnt; /* 32-bit offset 64 */ #endif + netdevice_tracker dev_tracker; }; struct dst_metrics { diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h index 67634675e919..df6622a5fe98 100644 --- a/include/net/dst_cache.h +++ b/include/net/dst_cache.h @@ -80,6 +80,17 @@ static inline void dst_cache_reset(struct dst_cache *dst_cache) } /** + * dst_cache_reset_now - invalidate the cache contents immediately + * @dst_cache: the cache + * + * The caller must be sure there are no concurrent users, as this frees + * all dst_cache users immediately, rather than waiting for the next + * per-cpu usage like dst_cache_reset does. Most callers should use the + * higher speed lazily-freed dst_cache_reset function instead. + */ +void dst_cache_reset_now(struct dst_cache *dst_cache); + +/** * dst_cache_init - initialize the cache, allocating the required storage * @dst_cache: the cache * @gfp: allocation flags diff --git a/include/net/failover.h b/include/net/failover.h index bb15438f39c7..f2b42b4b9cd6 100644 --- a/include/net/failover.h +++ b/include/net/failover.h @@ -25,6 +25,7 @@ struct failover_ops { struct failover { struct list_head list; struct net_device __rcu *failover_dev; + netdevice_tracker dev_tracker; struct failover_ops __rcu *ops; }; diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 4b10676c69d1..bd07484ab9dd 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -69,7 +69,7 @@ struct fib_rules_ops { int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); - bool (*suppress)(struct fib_rule *, + bool (*suppress)(struct fib_rule *, int, struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); @@ -218,7 +218,9 @@ INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule, struct fib_lookup_arg *arg)); INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule, + int flags, struct fib_lookup_arg *arg)); INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule, + int flags, struct fib_lookup_arg *arg)); #endif diff --git a/include/net/gro.h b/include/net/gro.h index b1139fca7c43..8f75802d50fd 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -173,8 +173,8 @@ static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { if (NAPI_GRO_CB(skb)->csum_valid) - NAPI_GRO_CB(skb)->csum = ~csum_partial(start, len, - ~NAPI_GRO_CB(skb)->csum); + NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len, + wsum_negate(NAPI_GRO_CB(skb)->csum))); } /* GRO checksum functions. These are logical equivalents of the normal diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 653e7d0f65cb..f026cf08a8e8 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -160,6 +160,7 @@ struct ipv6_devstat { struct inet6_dev { struct net_device *dev; + netdevice_tracker dev_tracker; struct list_head addr_list; diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 028eaea1c854..a38c4f1e4e5c 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -46,6 +46,7 @@ struct __ip6_tnl_parm { struct ip6_tnl { struct ip6_tnl __rcu *next; /* next tunnel in list */ struct net_device *dev; /* virtual device associated with tunnel */ + netdevice_tracker dev_tracker; struct net *net; /* netns for packet i/o */ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct flowi fl; /* flowi template for xmit */ diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index ab5348e57db1..c4297704bbcb 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -79,6 +79,7 @@ struct fnhe_hash_bucket { struct fib_nh_common { struct net_device *nhc_dev; + netdevice_tracker nhc_dev_tracker; int nhc_oif; unsigned char nhc_scope; u8 nhc_family; @@ -111,6 +112,7 @@ struct fib_nh { int nh_saddr_genid; #define fib_nh_family nh_common.nhc_family #define fib_nh_dev nh_common.nhc_dev +#define fib_nh_dev_tracker nh_common.nhc_dev_tracker #define fib_nh_oif nh_common.nhc_oif #define fib_nh_flags nh_common.nhc_flags #define fib_nh_lws nh_common.nhc_lwtstate @@ -438,7 +440,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, #ifdef CONFIG_IP_ROUTE_CLASSID static inline int fib_num_tclassid_users(struct net *net) { - return net->ipv4.fib_num_tclassid_users; + return atomic_read(&net->ipv4.fib_num_tclassid_users); } #else static inline int fib_num_tclassid_users(struct net *net) diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index bc3b13ec93c9..0219fe907b26 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -104,7 +104,10 @@ struct metadata_dst; struct ip_tunnel { struct ip_tunnel __rcu *next; struct hlist_node hash_node; + struct net_device *dev; + netdevice_tracker dev_tracker; + struct net *net; /* netns for packet i/o */ unsigned long err_time; /* Time when the last ICMP error diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index ea985aa7a6c5..2c1ea3414640 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -38,6 +38,7 @@ struct llc_sock { struct llc_addr laddr; /* lsap/mac pair */ struct llc_addr daddr; /* dsap/mac pair */ struct net_device *dev; /* device to send to remote */ + netdevice_tracker dev_tracker; u32 copied_seq; /* head of yet unread data */ u8 retry_count; /* number of retries */ u8 ack_must_be_send; diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 55af812c3ed5..937389e04c8e 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -70,6 +70,7 @@ enum { struct neigh_parms { possible_net_t net; struct net_device *dev; + netdevice_tracker dev_tracker; struct list_head list; int (*neigh_setup)(struct neighbour *); struct neigh_table *tbl; @@ -158,6 +159,7 @@ struct neighbour { struct list_head managed_list; struct rcu_head rcu; struct net_device *dev; + netdevice_tracker dev_tracker; u8 primary_key[0]; } __randomize_layout; @@ -173,6 +175,7 @@ struct pneigh_entry { struct pneigh_entry *next; possible_net_t net; struct net_device *dev; + netdevice_tracker dev_tracker; u32 flags; u8 protocol; u8 key[]; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index bb5fa5914032..5b61c462e534 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -34,6 +34,7 @@ #include <net/netns/smc.h> #include <net/netns/bpf.h> #include <net/netns/mctp.h> +#include <net/net_trackers.h> #include <linux/ns_common.h> #include <linux/idr.h> #include <linux/skbuff.h> @@ -87,6 +88,7 @@ struct net { struct idr netns_ids; struct ns_common ns; + struct ref_tracker_dir refcnt_tracker; struct list_head dev_base_head; struct proc_dir_entry *proc_net; @@ -240,6 +242,7 @@ void ipx_unregister_sysctl(void); #ifdef CONFIG_NET_NS void __put_net(struct net *net); +/* Try using get_net_track() instead */ static inline struct net *get_net(struct net *net) { refcount_inc(&net->ns.count); @@ -258,6 +261,7 @@ static inline struct net *maybe_get_net(struct net *net) return net; } +/* Try using put_net_track() instead */ static inline void put_net(struct net *net) { if (refcount_dec_and_test(&net->ns.count)) @@ -308,6 +312,36 @@ static inline int check_net(const struct net *net) #endif +static inline void netns_tracker_alloc(struct net *net, + netns_tracker *tracker, gfp_t gfp) +{ +#ifdef CONFIG_NET_NS_REFCNT_TRACKER + ref_tracker_alloc(&net->refcnt_tracker, tracker, gfp); +#endif +} + +static inline void netns_tracker_free(struct net *net, + netns_tracker *tracker) +{ +#ifdef CONFIG_NET_NS_REFCNT_TRACKER + ref_tracker_free(&net->refcnt_tracker, tracker); +#endif +} + +static inline struct net *get_net_track(struct net *net, + netns_tracker *tracker, gfp_t gfp) +{ + get_net(net); + netns_tracker_alloc(net, tracker, gfp); + return net; +} + +static inline void put_net_track(struct net *net, netns_tracker *tracker) +{ + netns_tracker_free(net, tracker); + put_net(net); +} + typedef struct { #ifdef CONFIG_NET_NS struct net *net; diff --git a/include/net/net_trackers.h b/include/net/net_trackers.h new file mode 100644 index 000000000000..d94c76cf15a9 --- /dev/null +++ b/include/net/net_trackers.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NET_NET_TRACKERS_H +#define __NET_NET_TRACKERS_H +#include <linux/ref_tracker.h> + +#ifdef CONFIG_NET_DEV_REFCNT_TRACKER +typedef struct ref_tracker *netdevice_tracker; +#else +typedef struct {} netdevice_tracker; +#endif + +#ifdef CONFIG_NET_NS_REFCNT_TRACKER +typedef struct ref_tracker *netns_tracker; +#else +typedef struct {} netns_tracker; +#endif + +#endif /* __NET_NET_TRACKERS_H */ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index cc663c68ddc4..d24b0a34c8f0 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -276,14 +276,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb) /* jiffies until ct expires, 0 if already expired */ static inline unsigned long nf_ct_expires(const struct nf_conn *ct) { - s32 timeout = ct->timeout - nfct_time_stamp; + s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp; return timeout > 0 ? timeout : 0; } static inline bool nf_ct_is_expired(const struct nf_conn *ct) { - return (__s32)(ct->timeout - nfct_time_stamp) <= 0; + return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0; } /* use after obtaining a reference count */ @@ -302,7 +302,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct) static inline void nf_ct_offload_timeout(struct nf_conn *ct) { if (nf_ct_expires(ct) < NF_CT_DAY / 2) - ct->timeout = nfct_time_stamp + NF_CT_DAY; + WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY); } struct kernel_param; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 2f65701a43c9..6c5b2efc4f17 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -65,7 +65,7 @@ struct netns_ipv4 { bool fib_has_custom_local_routes; bool fib_offload_disabled; #ifdef CONFIG_IP_ROUTE_CLASSID - int fib_num_tclassid_users; + atomic_t fib_num_tclassid_users; #endif struct hlist_head *fib_table_hash; struct sock *fibnl; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 193f88ebf629..cebc1bd713b6 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -202,7 +202,8 @@ struct tcf_exts { __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ int nr_actions; struct tc_action **actions; - struct net *net; + struct net *net; + netns_tracker ns_tracker; #endif /* Map to export classifier specific extension TLV types to the * generic extensions API. Unsupported extensions must be set to 0. @@ -218,6 +219,7 @@ static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net, exts->type = 0; exts->nr_actions = 0; exts->net = net; + netns_tracker_alloc(net, &exts->ns_tracker, GFP_KERNEL); exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), GFP_KERNEL); if (!exts->actions) @@ -236,6 +238,8 @@ static inline bool tcf_exts_get_net(struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT exts->net = maybe_get_net(exts->net); + if (exts->net) + netns_tracker_alloc(exts->net, &exts->ns_tracker, GFP_KERNEL); return exts->net != NULL; #else return true; @@ -246,7 +250,7 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT if (exts->net) - put_net(exts->net); + put_net_track(exts->net, &exts->ns_tracker); #endif } diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 22179b2fda72..dbf202bb1a0e 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -125,7 +125,7 @@ struct Qdisc { spinlock_t seqlock; struct rcu_head rcu; - + netdevice_tracker dev_tracker; /* private data */ long privdata[] ____cacheline_aligned; }; diff --git a/include/net/sock.h b/include/net/sock.h index e7c231373875..37f878564d25 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -350,6 +350,7 @@ struct bpf_local_storage; * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME * @sk_txtime_report_errors: set report errors mode for SO_TXTIME * @sk_txtime_unused: unused txtime flags + * @ns_tracker: tracker for netns reference */ struct sock { /* @@ -538,6 +539,7 @@ struct sock { struct bpf_local_storage __rcu *sk_bpf_storage; #endif struct rcu_head sk_rcu; + netns_tracker ns_tracker; }; enum sk_pacing { @@ -1633,16 +1635,6 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) __sk_mem_reclaim(sk, SK_RECLAIM_CHUNK); } -static inline void sock_release_ownership(struct sock *sk) -{ - if (sk->sk_lock.owned) { - sk->sk_lock.owned = 0; - - /* The sk_lock has mutex_unlock() semantics: */ - mutex_release(&sk->sk_lock.dep_map, _RET_IP_); - } -} - /* * Macro so as to not evaluate some arguments when * lockdep is not enabled. @@ -1769,12 +1761,23 @@ static inline bool sock_owned_by_user_nocheck(const struct sock *sk) return sk->sk_lock.owned; } +static inline void sock_release_ownership(struct sock *sk) +{ + if (sock_owned_by_user_nocheck(sk)) { + sk->sk_lock.owned = 0; + + /* The sk_lock has mutex_unlock() semantics: */ + mutex_release(&sk->sk_lock.dep_map, _RET_IP_); + } +} + /* no reclassification while locks are held */ static inline bool sock_allow_reclassification(const struct sock *csk) { struct sock *sk = (struct sock *)csk; - return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock); + return !sock_owned_by_user_nocheck(sk) && + !spin_is_locked(&sk->sk_lock.slock); } struct sock *sk_alloc(struct net *net, int family, gfp_t priority, @@ -1943,18 +1946,31 @@ static inline int sk_tx_queue_get(const struct sock *sk) return -1; } -static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) +static inline void __sk_rx_queue_set(struct sock *sk, + const struct sk_buff *skb, + bool force_set) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING if (skb_rx_queue_recorded(skb)) { u16 rx_queue = skb_get_rx_queue(skb); - if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) + if (force_set || + unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); } #endif } +static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) +{ + __sk_rx_queue_set(sk, skb, true); +} + +static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb) +{ + __sk_rx_queue_set(sk, skb, false); +} + static inline void sk_rx_queue_clear(struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING @@ -2457,19 +2473,22 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) * @sk: socket * * Use the per task page_frag instead of the per socket one for - * optimization when we know that we're in the normal context and owns + * optimization when we know that we're in process context and own * everything that's associated with %current. * - * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest - * inside other socket operations and end up recursing into sk_page_frag() - * while it's already in use. + * Both direct reclaim and page faults can nest inside other + * socket operations and end up recursing into sk_page_frag() + * while it's already in use: explicitly avoid task page_frag + * usage if the caller is potentially doing any of them. + * This assumes that page fault handlers use the GFP_NOFS flags. * * Return: a per task page_frag if context allows that, * otherwise a per socket one. */ static inline struct page_frag *sk_page_frag(struct sock *sk) { - if (gfpflags_normal_context(sk->sk_allocation)) + if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) == + (__GFP_DIRECT_RECLAIM | __GFP_FS)) return ¤t->task_frag; return &sk->sk_frag; diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h index 1cace4c69e44..32ce8ea36950 100644 --- a/include/net/tc_act/tc_mirred.h +++ b/include/net/tc_act/tc_mirred.h @@ -10,6 +10,7 @@ struct tcf_mirred { int tcfm_eaction; bool tcfm_mac_header_xmit; struct net_device __rcu *tcfm_dev; + netdevice_tracker tcfm_dev_tracker; struct list_head tcfm_list; }; #define to_mirred(a) ((struct tcf_mirred *)a) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 2308210793a0..83b46da8873d 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -128,6 +128,7 @@ struct xfrm_state_walk { struct xfrm_state_offload { struct net_device *dev; + netdevice_tracker dev_tracker; struct net_device *real_dev; unsigned long offload_handle; unsigned int num_exthdrs; @@ -1913,7 +1914,7 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x) if (dev->xfrmdev_ops->xdo_dev_state_free) dev->xfrmdev_ops->xdo_dev_state_free(x); xso->dev = NULL; - dev_put(dev); + dev_put_track(dev, &xso->dev_tracker); } } #else diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 33f2e8c9e88b..3e9454b00562 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -118,6 +118,7 @@ enum ocelot_target { S2, HSIO, PTP, + FDMA, GCB, DEV_GMII, TARGET_MAX, @@ -732,6 +733,8 @@ struct ocelot { /* Protects the PTP clock */ spinlock_t ptp_clock_lock; struct ptp_pin_desc ptp_pins[OCELOT_PTP_PINS_NUM]; + + struct ocelot_fdma *fdma; }; struct ocelot_policer { @@ -794,8 +797,11 @@ void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target, bool ocelot_can_inject(struct ocelot *ocelot, int grp); void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, u32 rew_op, struct sk_buff *skb); +void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag); int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb); void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp); +void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb, + u64 timestamp); /* Hardware initialization */ int ocelot_regfields_init(struct ocelot *ocelot, diff --git a/include/soc/mscc/vsc7514_regs.h b/include/soc/mscc/vsc7514_regs.h new file mode 100644 index 000000000000..ceee26c96959 --- /dev/null +++ b/include/soc/mscc/vsc7514_regs.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2021 Innovative Advantage Inc. + */ + +#ifndef VSC7514_REGS_H +#define VSC7514_REGS_H + +#include <soc/mscc/ocelot_vcap.h> + +extern const u32 vsc7514_ana_regmap[]; +extern const u32 vsc7514_qs_regmap[]; +extern const u32 vsc7514_qsys_regmap[]; +extern const u32 vsc7514_rew_regmap[]; +extern const u32 vsc7514_sys_regmap[]; +extern const u32 vsc7514_vcap_regmap[]; +extern const u32 vsc7514_ptp_regmap[]; +extern const u32 vsc7514_dev_gmii_regmap[]; + +extern const struct vcap_field vsc7514_vcap_es0_keys[]; +extern const struct vcap_field vsc7514_vcap_es0_actions[]; +extern const struct vcap_field vsc7514_vcap_is1_keys[]; +extern const struct vcap_field vsc7514_vcap_is1_actions[]; +extern const struct vcap_field vsc7514_vcap_is2_keys[]; +extern const struct vcap_field vsc7514_vcap_is2_actions[]; + +#endif diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index 31f4c4f9aeea..ac0893df9c76 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -147,7 +147,7 @@ struct snd_soc_acpi_link_adr { */ /* Descriptor for SST ASoC machine driver */ struct snd_soc_acpi_mach { - const u8 id[ACPI_ID_LEN]; + u8 id[ACPI_ID_LEN]; const struct snd_soc_acpi_codecs *comp_ids; const u32 link_mask; const struct snd_soc_acpi_link_adr *links; diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h index 3ba63319af3c..c9048f3e471b 100644 --- a/include/trace/events/rpcgss.h +++ b/include/trace/events/rpcgss.h @@ -8,7 +8,7 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM rpcgss -#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_TRACE_RPCGSS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_RPCGSS_H #include <linux/tracepoint.h> diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index a13e20cc66b4..0512fde5e697 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -196,6 +196,13 @@ struct drm_virtgpu_context_init { __u64 ctx_set_params; }; +/* + * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in + * effect. The event size is sizeof(drm_event), since there is no additional + * payload. + */ +#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000 + #define DRM_IOCTL_VIRTGPU_MAP \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 6297eafdc40f..c26871263f1f 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1342,8 +1342,10 @@ union bpf_attr { /* or valid module BTF object fd or 0 to attach to vmlinux */ __u32 attach_btf_obj_fd; }; - __u32 :32; /* pad */ + __u32 core_relo_cnt; /* number of bpf_core_relo */ __aligned_u64 fd_array; /* array of FDs */ + __aligned_u64 core_relos; + __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -1744,7 +1746,7 @@ union bpf_attr { * if the maximum number of tail calls has been reached for this * chain of programs. This limit is defined in the kernel by the * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), - * which is currently set to 32. + * which is currently set to 33. * Return * 0 on success, or a negative error in case of failure. * @@ -4957,6 +4959,30 @@ union bpf_attr { * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. * **-EBUSY** if failed to try lock mmap_lock. * **-EINVAL** for invalid **flags**. + * + * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags) + * Description + * For **nr_loops**, call **callback_fn** function + * with **callback_ctx** as the context parameter. + * The **callback_fn** should be a static function and + * the **callback_ctx** should be a pointer to the stack. + * The **flags** is used to control certain aspects of the helper. + * Currently, the **flags** must be 0. Currently, nr_loops is + * limited to 1 << 23 (~8 million) loops. + * + * long (\*callback_fn)(u32 index, void \*ctx); + * + * where **index** is the current index in the loop. The index + * is zero-indexed. + * + * If **callback_fn** returns 0, the helper will continue to the next + * loop. If return value is 1, the helper will skip the rest of + * the loops and return. Other return values are not used now, + * and will be rejected by the verifier. + * + * Return + * The number of loops performed, **-EINVAL** for invalid **flags**, + * **-E2BIG** if **nr_loops** exceeds the maximum number of loops. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5140,6 +5166,7 @@ union bpf_attr { FN(skc_to_unix_sock), \ FN(kallsyms_lookup_name), \ FN(find_vma), \ + FN(loop), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -6349,4 +6376,78 @@ enum { BTF_F_ZERO = (1ULL << 3), }; +/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value + * has to be adjusted by relocations. It is emitted by llvm and passed to + * libbpf and later to the kernel. + */ +enum bpf_core_relo_kind { + BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */ + BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ + BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ + BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ + BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ + BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */ + BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */ + BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */ + BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ + BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */ +}; + +/* + * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf + * and from libbpf to the kernel. + * + * CO-RE relocation captures the following data: + * - insn_off - instruction offset (in bytes) within a BPF program that needs + * its insn->imm field to be relocated with actual field info; + * - type_id - BTF type ID of the "root" (containing) entity of a relocatable + * type or field; + * - access_str_off - offset into corresponding .BTF string section. String + * interpretation depends on specific relocation kind: + * - for field-based relocations, string encodes an accessed field using + * a sequence of field and array indices, separated by colon (:). It's + * conceptually very close to LLVM's getelementptr ([0]) instruction's + * arguments for identifying offset to a field. + * - for type-based relocations, strings is expected to be just "0"; + * - for enum value-based relocations, string contains an index of enum + * value within its enum type; + * - kind - one of enum bpf_core_relo_kind; + * + * Example: + * struct sample { + * int a; + * struct { + * int b[10]; + * }; + * }; + * + * struct sample *s = ...; + * int *x = &s->a; // encoded as "0:0" (a is field #0) + * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, + * // b is field #0 inside anon struct, accessing elem #5) + * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) + * + * type_id for all relocs in this example will capture BTF type id of + * `struct sample`. + * + * Such relocation is emitted when using __builtin_preserve_access_index() + * Clang built-in, passing expression that captures field address, e.g.: + * + * bpf_probe_read(&dst, sizeof(dst), + * __builtin_preserve_access_index(&src->a.b.c)); + * + * In this case Clang will emit field relocation recording necessary data to + * be able to find offset of embedded `a.b.c` field within `src` struct. + * + * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction + */ +struct bpf_core_relo { + __u32 insn_off; + __u32 type_id; + __u32 access_str_off; + enum bpf_core_relo_kind kind; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 5da4ee234e0b..c0c2f3ed5729 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -117,7 +117,7 @@ #define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */ #define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */ -#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value +#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is more than this value * then the frame is Ethernet II. Else it is 802.3 */ /* diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index fcc61c73a666..e258e52cfd1f 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -62,7 +62,7 @@ struct so_timestamping { /** * struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter * - * @flags: no flags defined right now, must be zero for %SIOCSHWTSTAMP + * @flags: one of HWTSTAMP_FLAG_* * @tx_type: one of HWTSTAMP_TX_* * @rx_filter: one of HWTSTAMP_FILTER_* * @@ -78,6 +78,20 @@ struct hwtstamp_config { int rx_filter; }; +/* possible values for hwtstamp_config->flags */ +enum hwtstamp_flags { + /* + * With this flag, the user could get bond active interface's + * PHC index. Note this PHC index is not stable as when there + * is a failover, the bond active interface will be changed, so + * will be the PHC index. + */ + HWTSTAMP_FLAG_BONDED_PHC_INDEX = (1<<0), + + HWTSTAMP_FLAG_LAST = HWTSTAMP_FLAG_BONDED_PHC_INDEX, + HWTSTAMP_FLAG_MASK = (HWTSTAMP_FLAG_LAST - 1) | HWTSTAMP_FLAG_LAST +}; + /* possible values for hwtstamp_config->tx_type */ enum hwtstamp_tx_types { /* diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index e32ec6932e82..904909d020e2 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -292,7 +292,6 @@ enum LINUX_MIB_TCPDSACKIGNOREDDUBIOUS, /* TCPDSACKIgnoredDubious */ LINUX_MIB_TCPMIGRATEREQSUCCESS, /* TCPMigrateReqSuccess */ LINUX_MIB_TCPMIGRATEREQFAILURE, /* TCPMigrateReqFailure */ - LINUX_MIB_TCPSMALLQUEUEFAILURE, /* TCPSmallQueueFailure */ __LINUX_MIB_MAX }; diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index cf6ca339f3cd..c1a9be6a4b9f 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -36,3 +36,7 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o obj-${CONFIG_BPF_LSM} += bpf_lsm.o endif obj-$(CONFIG_BPF_PRELOAD) += preload/ + +obj-$(CONFIG_BPF_SYSCALL) += relo_core.o +$(obj)/relo_core.o: $(srctree)/tools/lib/bpf/relo_core.c FORCE + $(call if_changed_rule,cc_o_c) diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index b2ee45064e06..b7aef5b3416d 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -714,3 +714,38 @@ const struct bpf_func_proto bpf_for_each_map_elem_proto = { .arg3_type = ARG_PTR_TO_STACK_OR_NULL, .arg4_type = ARG_ANYTHING, }; + +/* maximum number of loops */ +#define MAX_LOOPS BIT(23) + +BPF_CALL_4(bpf_loop, u32, nr_loops, void *, callback_fn, void *, callback_ctx, + u64, flags) +{ + bpf_callback_t callback = (bpf_callback_t)callback_fn; + u64 ret; + u32 i; + + if (flags) + return -EINVAL; + if (nr_loops > MAX_LOOPS) + return -E2BIG; + + for (i = 0; i < nr_loops; i++) { + ret = callback((u64)i, (u64)(long)callback_ctx, 0, 0, 0); + /* return value: 0 - continue, 1 - stop and return */ + if (ret) + return i + 1; + } + + return i; +} + +const struct bpf_func_proto bpf_loop_proto = { + .func = bpf_loop, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, + .arg2_type = ARG_PTR_TO_FUNC, + .arg3_type = ARG_PTR_TO_STACK_OR_NULL, + .arg4_type = ARG_ANYTHING, +}; diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 8ecfe4752769..21069dbe9138 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -165,7 +165,7 @@ void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) break; } - if (btf_member_bitfield_size(t, member)) { + if (__btf_member_bitfield_size(t, member)) { pr_warn("bit field member %s in struct %s is not supported\n", mname, st_ops->name); break; @@ -296,7 +296,7 @@ static int check_zero_holes(const struct btf_type *t, void *data) const struct btf_type *mtype; for_each_member(i, t, member) { - moff = btf_member_bit_offset(t, member) / 8; + moff = __btf_member_bit_offset(t, member) / 8; if (moff > prev_mend && memchr_inv(data + prev_mend, 0, moff - prev_mend)) return -EINVAL; @@ -387,7 +387,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, struct bpf_prog *prog; u32 moff; - moff = btf_member_bit_offset(t, member) / 8; + moff = __btf_member_bit_offset(t, member) / 8; ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL); if (ptype == module_type) { if (*(void **)(udata + moff)) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 6b9d23be1e99..8b00c6e4d6fb 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -25,6 +25,7 @@ #include <linux/kobject.h> #include <linux/sysfs.h> #include <net/sock.h> +#include "../tools/lib/bpf/relo_core.h" /* BTF (BPF Type Format) is the meta data format which describes * the data types of BPF program/map. Hence, it basically focus @@ -836,7 +837,7 @@ static const char *btf_show_name(struct btf_show *show) const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)]; const char *name = NULL, *prefix = "", *parens = ""; const struct btf_member *m = show->state.member; - const struct btf_type *t = show->state.type; + const struct btf_type *t; const struct btf_array *array; u32 id = show->state.type_id; const char *member = NULL; @@ -2969,7 +2970,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env, return -EINVAL; } - offset = btf_member_bit_offset(t, member); + offset = __btf_member_bit_offset(t, member); if (is_union && offset) { btf_verifier_log_member(env, t, member, "Invalid member bits_offset"); @@ -3094,7 +3095,7 @@ static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t if (off != -ENOENT) /* only one such field is allowed */ return -E2BIG; - off = btf_member_bit_offset(t, member); + off = __btf_member_bit_offset(t, member); if (off % 8) /* valid C code cannot generate such BTF */ return -EINVAL; @@ -3184,8 +3185,8 @@ static void __btf_struct_show(const struct btf *btf, const struct btf_type *t, btf_show_start_member(show, member); - member_offset = btf_member_bit_offset(t, member); - bitfield_size = btf_member_bitfield_size(t, member); + member_offset = __btf_member_bit_offset(t, member); + bitfield_size = __btf_member_bitfield_size(t, member); bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); bits8_offset = BITS_PER_BYTE_MASKED(member_offset); if (bitfield_size) { @@ -4472,8 +4473,7 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, log->len_total = log_size; /* log attributes have to be sane */ - if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || - !log->level || !log->ubuf) { + if (!bpf_verifier_log_attr_valid(log)) { err = -EINVAL; goto errout; } @@ -5060,7 +5060,7 @@ again: if (array_elem->nelems != 0) goto error; - moff = btf_member_bit_offset(t, member) / 8; + moff = __btf_member_bit_offset(t, member) / 8; if (off < moff) goto error; @@ -5083,14 +5083,14 @@ error: for_each_member(i, t, member) { /* offset of the field in bytes */ - moff = btf_member_bit_offset(t, member) / 8; + moff = __btf_member_bit_offset(t, member) / 8; if (off + size <= moff) /* won't find anything, field is already too far */ break; - if (btf_member_bitfield_size(t, member)) { - u32 end_bit = btf_member_bit_offset(t, member) + - btf_member_bitfield_size(t, member); + if (__btf_member_bitfield_size(t, member)) { + u32 end_bit = __btf_member_bit_offset(t, member) + + __btf_member_bitfield_size(t, member); /* off <= moff instead of off == moff because clang * does not generate a BTF member for anonymous @@ -6169,6 +6169,8 @@ btf_module_read(struct file *file, struct kobject *kobj, return len; } +static void purge_cand_cache(struct btf *btf); + static int btf_module_notify(struct notifier_block *nb, unsigned long op, void *module) { @@ -6203,6 +6205,7 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op, goto out; } + purge_cand_cache(NULL); mutex_lock(&btf_module_mutex); btf_mod->module = module; btf_mod->btf = btf; @@ -6245,6 +6248,7 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op, list_del(&btf_mod->list); if (btf_mod->sysfs_attr) sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr); + purge_cand_cache(btf_mod->btf); btf_put(btf_mod->btf); kfree(btf_mod->sysfs_attr); kfree(btf_mod); @@ -6361,11 +6365,6 @@ BTF_TRACING_TYPE_xxx /* BTF ID set registration API for modules */ -struct kfunc_btf_id_list { - struct list_head list; - struct mutex mutex; -}; - #ifdef CONFIG_DEBUG_INFO_BTF_MODULES void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, @@ -6391,8 +6390,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, { struct kfunc_btf_id_set *s; - if (!owner) - return false; mutex_lock(&klist->mutex); list_for_each_entry(s, &klist->list, list) { if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) { @@ -6404,8 +6401,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, return false; } -#endif - #define DEFINE_KFUNC_BTF_ID_LIST(name) \ struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list), \ __MUTEX_INITIALIZER(name.mutex) }; \ @@ -6413,3 +6408,387 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list); DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list); + +#endif + +int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, + const struct btf *targ_btf, __u32 targ_id) +{ + return -EOPNOTSUPP; +} + +static bool bpf_core_is_flavor_sep(const char *s) +{ + /* check X___Y name pattern, where X and Y are not underscores */ + return s[0] != '_' && /* X */ + s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ + s[4] != '_'; /* Y */ +} + +size_t bpf_core_essential_name_len(const char *name) +{ + size_t n = strlen(name); + int i; + + for (i = n - 5; i >= 0; i--) { + if (bpf_core_is_flavor_sep(name + i)) + return i + 1; + } + return n; +} + +struct bpf_cand_cache { + const char *name; + u32 name_len; + u16 kind; + u16 cnt; + struct { + const struct btf *btf; + u32 id; + } cands[]; +}; + +static void bpf_free_cands(struct bpf_cand_cache *cands) +{ + if (!cands->cnt) + /* empty candidate array was allocated on stack */ + return; + kfree(cands); +} + +static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands) +{ + kfree(cands->name); + kfree(cands); +} + +#define VMLINUX_CAND_CACHE_SIZE 31 +static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE]; + +#define MODULE_CAND_CACHE_SIZE 31 +static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE]; + +static DEFINE_MUTEX(cand_cache_mutex); + +static void __print_cand_cache(struct bpf_verifier_log *log, + struct bpf_cand_cache **cache, + int cache_size) +{ + struct bpf_cand_cache *cc; + int i, j; + + for (i = 0; i < cache_size; i++) { + cc = cache[i]; + if (!cc) + continue; + bpf_log(log, "[%d]%s(", i, cc->name); + for (j = 0; j < cc->cnt; j++) { + bpf_log(log, "%d", cc->cands[j].id); + if (j < cc->cnt - 1) + bpf_log(log, " "); + } + bpf_log(log, "), "); + } +} + +static void print_cand_cache(struct bpf_verifier_log *log) +{ + mutex_lock(&cand_cache_mutex); + bpf_log(log, "vmlinux_cand_cache:"); + __print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); + bpf_log(log, "\nmodule_cand_cache:"); + __print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE); + bpf_log(log, "\n"); + mutex_unlock(&cand_cache_mutex); +} + +static u32 hash_cands(struct bpf_cand_cache *cands) +{ + return jhash(cands->name, cands->name_len, 0); +} + +static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands, + struct bpf_cand_cache **cache, + int cache_size) +{ + struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size]; + + if (cc && cc->name_len == cands->name_len && + !strncmp(cc->name, cands->name, cands->name_len)) + return cc; + return NULL; +} + +static size_t sizeof_cands(int cnt) +{ + return offsetof(struct bpf_cand_cache, cands[cnt]); +} + +static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands, + struct bpf_cand_cache **cache, + int cache_size) +{ + struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands; + + if (*cc) { + bpf_free_cands_from_cache(*cc); + *cc = NULL; + } + new_cands = kmalloc(sizeof_cands(cands->cnt), GFP_KERNEL); + if (!new_cands) { + bpf_free_cands(cands); + return ERR_PTR(-ENOMEM); + } + memcpy(new_cands, cands, sizeof_cands(cands->cnt)); + /* strdup the name, since it will stay in cache. + * the cands->name points to strings in prog's BTF and the prog can be unloaded. + */ + new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL); + bpf_free_cands(cands); + if (!new_cands->name) { + kfree(new_cands); + return ERR_PTR(-ENOMEM); + } + *cc = new_cands; + return new_cands; +} + +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES +static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache, + int cache_size) +{ + struct bpf_cand_cache *cc; + int i, j; + + for (i = 0; i < cache_size; i++) { + cc = cache[i]; + if (!cc) + continue; + if (!btf) { + /* when new module is loaded purge all of module_cand_cache, + * since new module might have candidates with the name + * that matches cached cands. + */ + bpf_free_cands_from_cache(cc); + cache[i] = NULL; + continue; + } + /* when module is unloaded purge cache entries + * that match module's btf + */ + for (j = 0; j < cc->cnt; j++) + if (cc->cands[j].btf == btf) { + bpf_free_cands_from_cache(cc); + cache[i] = NULL; + break; + } + } + +} + +static void purge_cand_cache(struct btf *btf) +{ + mutex_lock(&cand_cache_mutex); + __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE); + mutex_unlock(&cand_cache_mutex); +} +#endif + +static struct bpf_cand_cache * +bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf, + int targ_start_id) +{ + struct bpf_cand_cache *new_cands; + const struct btf_type *t; + const char *targ_name; + size_t targ_essent_len; + int n, i; + + n = btf_nr_types(targ_btf); + for (i = targ_start_id; i < n; i++) { + t = btf_type_by_id(targ_btf, i); + if (btf_kind(t) != cands->kind) + continue; + + targ_name = btf_name_by_offset(targ_btf, t->name_off); + if (!targ_name) + continue; + + /* the resched point is before strncmp to make sure that search + * for non-existing name will have a chance to schedule(). + */ + cond_resched(); + + if (strncmp(cands->name, targ_name, cands->name_len) != 0) + continue; + + targ_essent_len = bpf_core_essential_name_len(targ_name); + if (targ_essent_len != cands->name_len) + continue; + + /* most of the time there is only one candidate for a given kind+name pair */ + new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL); + if (!new_cands) { + bpf_free_cands(cands); + return ERR_PTR(-ENOMEM); + } + + memcpy(new_cands, cands, sizeof_cands(cands->cnt)); + bpf_free_cands(cands); + cands = new_cands; + cands->cands[cands->cnt].btf = targ_btf; + cands->cands[cands->cnt].id = i; + cands->cnt++; + } + return cands; +} + +static struct bpf_cand_cache * +bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id) +{ + struct bpf_cand_cache *cands, *cc, local_cand = {}; + const struct btf *local_btf = ctx->btf; + const struct btf_type *local_type; + const struct btf *main_btf; + size_t local_essent_len; + struct btf *mod_btf; + const char *name; + int id; + + main_btf = bpf_get_btf_vmlinux(); + if (IS_ERR(main_btf)) + return (void *)main_btf; + + local_type = btf_type_by_id(local_btf, local_type_id); + if (!local_type) + return ERR_PTR(-EINVAL); + + name = btf_name_by_offset(local_btf, local_type->name_off); + if (str_is_empty(name)) + return ERR_PTR(-EINVAL); + local_essent_len = bpf_core_essential_name_len(name); + + cands = &local_cand; + cands->name = name; + cands->kind = btf_kind(local_type); + cands->name_len = local_essent_len; + + cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); + /* cands is a pointer to stack here */ + if (cc) { + if (cc->cnt) + return cc; + goto check_modules; + } + + /* Attempt to find target candidates in vmlinux BTF first */ + cands = bpf_core_add_cands(cands, main_btf, 1); + if (IS_ERR(cands)) + return cands; + + /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */ + + /* populate cache even when cands->cnt == 0 */ + cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); + if (IS_ERR(cc)) + return cc; + + /* if vmlinux BTF has any candidate, don't go for module BTFs */ + if (cc->cnt) + return cc; + +check_modules: + /* cands is a pointer to stack here and cands->cnt == 0 */ + cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE); + if (cc) + /* if cache has it return it even if cc->cnt == 0 */ + return cc; + + /* If candidate is not found in vmlinux's BTF then search in module's BTFs */ + spin_lock_bh(&btf_idr_lock); + idr_for_each_entry(&btf_idr, mod_btf, id) { + if (!btf_is_module(mod_btf)) + continue; + /* linear search could be slow hence unlock/lock + * the IDR to avoiding holding it for too long + */ + btf_get(mod_btf); + spin_unlock_bh(&btf_idr_lock); + cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf)); + if (IS_ERR(cands)) { + btf_put(mod_btf); + return cands; + } + spin_lock_bh(&btf_idr_lock); + btf_put(mod_btf); + } + spin_unlock_bh(&btf_idr_lock); + /* cands is a pointer to kmalloced memory here if cands->cnt > 0 + * or pointer to stack if cands->cnd == 0. + * Copy it into the cache even when cands->cnt == 0 and + * return the result. + */ + return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE); +} + +int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, + int relo_idx, void *insn) +{ + bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL; + struct bpf_core_cand_list cands = {}; + struct bpf_core_spec *specs; + int err; + + /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" + * into arrays of btf_ids of struct fields and array indices. + */ + specs = kcalloc(3, sizeof(*specs), GFP_KERNEL); + if (!specs) + return -ENOMEM; + + if (need_cands) { + struct bpf_cand_cache *cc; + int i; + + mutex_lock(&cand_cache_mutex); + cc = bpf_core_find_cands(ctx, relo->type_id); + if (IS_ERR(cc)) { + bpf_log(ctx->log, "target candidate search failed for %d\n", + relo->type_id); + err = PTR_ERR(cc); + goto out; + } + if (cc->cnt) { + cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL); + if (!cands.cands) { + err = -ENOMEM; + goto out; + } + } + for (i = 0; i < cc->cnt; i++) { + bpf_log(ctx->log, + "CO-RE relocating %s %s: found target candidate [%d]\n", + btf_kind_str[cc->kind], cc->name, cc->cands[i].id); + cands.cands[i].btf = cc->cands[i].btf; + cands.cands[i].id = cc->cands[i].id; + } + cands.len = cc->cnt; + /* cand_cache_mutex needs to span the cache lookup and + * copy of btf pointer into bpf_core_cand_list, + * since module can be unloaded while bpf_core_apply_relo_insn + * is working with module's btf. + */ + } + + err = bpf_core_apply_relo_insn((void *)ctx->log, insn, relo->insn_off / 8, + relo, relo_idx, ctx->btf, &cands, specs); +out: + kfree(specs); + if (need_cands) { + kfree(cands.cands); + mutex_unlock(&cand_cache_mutex); + if (ctx->log->level & BPF_LOG_LEVEL2) + print_cand_cache(ctx->log); + } + return err; +} diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 2405e39d800f..de3e5bc6781f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1574,7 +1574,8 @@ select_insn: if (unlikely(index >= array->map.max_entries)) goto out; - if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) + + if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT)) goto out; tail_call_cnt++; @@ -1891,7 +1892,7 @@ static void bpf_prog_select_func(struct bpf_prog *fp) /** * bpf_prog_select_runtime - select exec runtime for BPF program - * @fp: bpf_prog populated with internal BPF program + * @fp: bpf_prog populated with BPF program * @err: pointer to error variable * * Try to JIT eBPF program, if JIT is not available, use interpreter. @@ -2300,7 +2301,6 @@ static void bpf_prog_free_deferred(struct work_struct *work) } } -/* Free internal BPF program */ void bpf_prog_free(struct bpf_prog *fp) { struct bpf_prog_aux *aux = fp->aux; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 649f07623df6..19fecfeaa9c2 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1376,6 +1376,8 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_ringbuf_query_proto; case BPF_FUNC_for_each_map_elem: return &bpf_for_each_map_elem_proto; + case BPF_FUNC_loop: + return &bpf_loop_proto; default: break; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 1033ee8c0caf..ddd81d543203 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2198,7 +2198,7 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) } /* last field in 'union bpf_attr' used by this command */ -#define BPF_PROG_LOAD_LAST_FIELD fd_array +#define BPF_PROG_LOAD_LAST_FIELD core_relo_rec_size static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) { @@ -4819,7 +4819,7 @@ const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_MEM, - .arg2_type = ARG_CONST_SIZE, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_LONG, }; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 88fe97d35166..b39de3ae50f5 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -293,13 +293,15 @@ void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, "verifier log line truncated - local buffer too short\n"); - n = min(log->len_total - log->len_used - 1, n); - log->kbuf[n] = '\0'; - if (log->level == BPF_LOG_KERNEL) { - pr_err("BPF:%s\n", log->kbuf); + bool newline = n > 0 && log->kbuf[n - 1] == '\n'; + + pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n"); return; } + + n = min(log->len_total - log->len_used - 1, n); + log->kbuf[n] = '\0'; if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else @@ -6101,6 +6103,27 @@ static int set_map_elem_callback_state(struct bpf_verifier_env *env, return 0; } +static int set_loop_callback_state(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee, + int insn_idx) +{ + /* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, + * u64 flags); + * callback_fn(u32 index, void *callback_ctx); + */ + callee->regs[BPF_REG_1].type = SCALAR_VALUE; + callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; + + /* unused */ + __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); + __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); + __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); + + callee->in_callback_fn = true; + return 0; +} + static int set_timer_callback_state(struct bpf_verifier_env *env, struct bpf_func_state *caller, struct bpf_func_state *callee, @@ -6474,13 +6497,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn return err; } - if (func_id == BPF_FUNC_tail_call) { - err = check_reference_leak(env); - if (err) { - verbose(env, "tail_call would lead to reference leak\n"); - return err; - } - } else if (is_release_function(func_id)) { + if (is_release_function(func_id)) { err = release_reference(env, meta.ref_obj_id); if (err) { verbose(env, "func %s#%d reference has not been acquired before\n", @@ -6491,42 +6508,47 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn regs = cur_regs(env); - /* check that flags argument in get_local_storage(map, flags) is 0, - * this is required because get_local_storage() can't return an error. - */ - if (func_id == BPF_FUNC_get_local_storage && - !register_is_null(®s[BPF_REG_2])) { - verbose(env, "get_local_storage() doesn't support non-zero flags\n"); - return -EINVAL; - } - - if (func_id == BPF_FUNC_for_each_map_elem) { + switch (func_id) { + case BPF_FUNC_tail_call: + err = check_reference_leak(env); + if (err) { + verbose(env, "tail_call would lead to reference leak\n"); + return err; + } + break; + case BPF_FUNC_get_local_storage: + /* check that flags argument in get_local_storage(map, flags) is 0, + * this is required because get_local_storage() can't return an error. + */ + if (!register_is_null(®s[BPF_REG_2])) { + verbose(env, "get_local_storage() doesn't support non-zero flags\n"); + return -EINVAL; + } + break; + case BPF_FUNC_for_each_map_elem: err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, set_map_elem_callback_state); - if (err < 0) - return -EINVAL; - } - - if (func_id == BPF_FUNC_timer_set_callback) { + break; + case BPF_FUNC_timer_set_callback: err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, set_timer_callback_state); - if (err < 0) - return -EINVAL; - } - - if (func_id == BPF_FUNC_find_vma) { + break; + case BPF_FUNC_find_vma: err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, set_find_vma_callback_state); - if (err < 0) - return -EINVAL; - } - - if (func_id == BPF_FUNC_snprintf) { + break; + case BPF_FUNC_snprintf: err = check_bpf_snprintf_call(env, regs); - if (err < 0) - return err; + break; + case BPF_FUNC_loop: + err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, + set_loop_callback_state); + break; } + if (err) + return err; + /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); @@ -8456,7 +8478,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, new_range = dst_reg->off; if (range_right_open) - new_range--; + new_range++; /* Examples for register markings: * @@ -10267,6 +10289,78 @@ err_free: return err; } +#define MIN_CORE_RELO_SIZE sizeof(struct bpf_core_relo) +#define MAX_CORE_RELO_SIZE MAX_FUNCINFO_REC_SIZE + +static int check_core_relo(struct bpf_verifier_env *env, + const union bpf_attr *attr, + bpfptr_t uattr) +{ + u32 i, nr_core_relo, ncopy, expected_size, rec_size; + struct bpf_core_relo core_relo = {}; + struct bpf_prog *prog = env->prog; + const struct btf *btf = prog->aux->btf; + struct bpf_core_ctx ctx = { + .log = &env->log, + .btf = btf, + }; + bpfptr_t u_core_relo; + int err; + + nr_core_relo = attr->core_relo_cnt; + if (!nr_core_relo) + return 0; + if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo)) + return -EINVAL; + + rec_size = attr->core_relo_rec_size; + if (rec_size < MIN_CORE_RELO_SIZE || + rec_size > MAX_CORE_RELO_SIZE || + rec_size % sizeof(u32)) + return -EINVAL; + + u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel); + expected_size = sizeof(struct bpf_core_relo); + ncopy = min_t(u32, expected_size, rec_size); + + /* Unlike func_info and line_info, copy and apply each CO-RE + * relocation record one at a time. + */ + for (i = 0; i < nr_core_relo; i++) { + /* future proofing when sizeof(bpf_core_relo) changes */ + err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size); + if (err) { + if (err == -E2BIG) { + verbose(env, "nonzero tailing record in core_relo"); + if (copy_to_bpfptr_offset(uattr, + offsetof(union bpf_attr, core_relo_rec_size), + &expected_size, sizeof(expected_size))) + err = -EFAULT; + } + break; + } + + if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) { + err = -EFAULT; + break; + } + + if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) { + verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n", + i, core_relo.insn_off, prog->len); + err = -EINVAL; + break; + } + + err = bpf_core_apply(&ctx, &core_relo, i, + &prog->insnsi[core_relo.insn_off / 8]); + if (err) + break; + bpfptr_add(&u_core_relo, rec_size); + } + return err; +} + static int check_btf_info(struct bpf_verifier_env *env, const union bpf_attr *attr, bpfptr_t uattr) @@ -10297,6 +10391,10 @@ static int check_btf_info(struct bpf_verifier_env *env, if (err) return err; + err = check_core_relo(env, attr, uattr); + if (err) + return err; + return 0; } @@ -13975,11 +14073,11 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; - ret = -EINVAL; /* log attributes have to be sane */ - if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || - !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) + if (!bpf_verifier_log_attr_valid(log)) { + ret = -EINVAL; goto err_unlock; + } } if (IS_ERR(btf_vmlinux)) { diff --git a/kernel/cpu.c b/kernel/cpu.c index 192e43a87407..407a2568f35e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -31,6 +31,7 @@ #include <linux/smpboot.h> #include <linux/relay.h> #include <linux/slab.h> +#include <linux/scs.h> #include <linux/percpu-rwsem.h> #include <linux/cpuset.h> @@ -588,6 +589,12 @@ static int bringup_cpu(unsigned int cpu) int ret; /* + * Reset stale stack state from the last time this CPU was online. + */ + scs_task_reset(idle); + kasan_unpoison_task_stack(idle); + + /* * Some architectures have to walk the irq descriptors to * setup the vector space for the cpu which comes online. * Prevent irq alloc/free across the bringup. diff --git a/kernel/events/core.c b/kernel/events/core.c index 523106a506ee..30d94f68c5bd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9759,6 +9759,9 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, continue; if (event->attr.config != entry->type) continue; + /* Cannot deliver synchronous signal to other task. */ + if (event->attr.sigtrap) + continue; if (perf_tp_event_match(event, &data, regs)) perf_swevent_event(event, count, &data, regs); } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index e9db0c810554..21eccc961bba 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2086,6 +2086,9 @@ int register_kretprobe(struct kretprobe *rp) } } + if (rp->data_size > KRETPROBE_MAX_DATA_SIZE) + return -E2BIG; + rp->kp.pre_handler = pre_handler_kretprobe; rp->kp.post_handler = NULL; diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index c51387a43265..04a74d040a6d 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -105,9 +105,9 @@ * atomic_long_cmpxchg() will be used to obtain writer lock. * * There are three places where the lock handoff bit may be set or cleared. - * 1) rwsem_mark_wake() for readers. - * 2) rwsem_try_write_lock() for writers. - * 3) Error path of rwsem_down_write_slowpath(). + * 1) rwsem_mark_wake() for readers -- set, clear + * 2) rwsem_try_write_lock() for writers -- set, clear + * 3) rwsem_del_waiter() -- clear * * For all the above cases, wait_lock will be held. A writer must also * be the first one in the wait_list to be eligible for setting the handoff @@ -334,6 +334,9 @@ struct rwsem_waiter { struct task_struct *task; enum rwsem_waiter_type type; unsigned long timeout; + + /* Writer only, not initialized in reader */ + bool handoff_set; }; #define rwsem_first_waiter(sem) \ list_first_entry(&sem->wait_list, struct rwsem_waiter, list) @@ -344,12 +347,6 @@ enum rwsem_wake_type { RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ }; -enum writer_wait_state { - WRITER_NOT_FIRST, /* Writer is not first in wait list */ - WRITER_FIRST, /* Writer is first in wait list */ - WRITER_HANDOFF /* Writer is first & handoff needed */ -}; - /* * The typical HZ value is either 250 or 1000. So set the minimum waiting * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait @@ -365,6 +362,31 @@ enum writer_wait_state { */ #define MAX_READERS_WAKEUP 0x100 +static inline void +rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) +{ + lockdep_assert_held(&sem->wait_lock); + list_add_tail(&waiter->list, &sem->wait_list); + /* caller will set RWSEM_FLAG_WAITERS */ +} + +/* + * Remove a waiter from the wait_list and clear flags. + * + * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of + * this function. Modify with care. + */ +static inline void +rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) +{ + lockdep_assert_held(&sem->wait_lock); + list_del(&waiter->list); + if (likely(!list_empty(&sem->wait_list))) + return; + + atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); +} + /* * handle the lock release when processes blocked on it that can now run * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must @@ -376,6 +398,8 @@ enum writer_wait_state { * preferably when the wait_lock is released * - woken process blocks are discarded from the list after having task zeroed * - writers are only marked woken if downgrading is false + * + * Implies rwsem_del_waiter() for all woken readers. */ static void rwsem_mark_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type, @@ -490,18 +514,25 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, adjustment = woken * RWSEM_READER_BIAS - adjustment; lockevent_cond_inc(rwsem_wake_reader, woken); + + oldcount = atomic_long_read(&sem->count); if (list_empty(&sem->wait_list)) { - /* hit end of list above */ + /* + * Combined with list_move_tail() above, this implies + * rwsem_del_waiter(). + */ adjustment -= RWSEM_FLAG_WAITERS; + if (oldcount & RWSEM_FLAG_HANDOFF) + adjustment -= RWSEM_FLAG_HANDOFF; + } else if (woken) { + /* + * When we've woken a reader, we no longer need to force + * writers to give up the lock and we can clear HANDOFF. + */ + if (oldcount & RWSEM_FLAG_HANDOFF) + adjustment -= RWSEM_FLAG_HANDOFF; } - /* - * When we've woken a reader, we no longer need to force writers - * to give up the lock and we can clear HANDOFF. - */ - if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF)) - adjustment -= RWSEM_FLAG_HANDOFF; - if (adjustment) atomic_long_add(adjustment, &sem->count); @@ -532,12 +563,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, * race conditions between checking the rwsem wait list and setting the * sem->count accordingly. * - * If wstate is WRITER_HANDOFF, it will make sure that either the handoff - * bit is set or the lock is acquired with handoff bit cleared. + * Implies rwsem_del_waiter() on success. */ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, - enum writer_wait_state wstate) + struct rwsem_waiter *waiter) { + bool first = rwsem_first_waiter(sem) == waiter; long count, new; lockdep_assert_held(&sem->wait_lock); @@ -546,13 +577,19 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, do { bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF); - if (has_handoff && wstate == WRITER_NOT_FIRST) - return false; + if (has_handoff) { + if (!first) + return false; + + /* First waiter inherits a previously set handoff bit */ + waiter->handoff_set = true; + } new = count; if (count & RWSEM_LOCK_MASK) { - if (has_handoff || (wstate != WRITER_HANDOFF)) + if (has_handoff || (!rt_task(waiter->task) && + !time_after(jiffies, waiter->timeout))) return false; new |= RWSEM_FLAG_HANDOFF; @@ -569,9 +606,17 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, * We have either acquired the lock with handoff bit cleared or * set the handoff bit. */ - if (new & RWSEM_FLAG_HANDOFF) + if (new & RWSEM_FLAG_HANDOFF) { + waiter->handoff_set = true; + lockevent_inc(rwsem_wlock_handoff); return false; + } + /* + * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on + * success. + */ + list_del(&waiter->list); rwsem_set_owner(sem); return true; } @@ -956,7 +1001,7 @@ queue: } adjustment += RWSEM_FLAG_WAITERS; } - list_add_tail(&waiter.list, &sem->wait_list); + rwsem_add_waiter(sem, &waiter); /* we're now waiting on the lock, but no longer actively locking */ count = atomic_long_add_return(adjustment, &sem->count); @@ -1002,11 +1047,7 @@ queue: return sem; out_nolock: - list_del(&waiter.list); - if (list_empty(&sem->wait_list)) { - atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF, - &sem->count); - } + rwsem_del_waiter(sem, &waiter); raw_spin_unlock_irq(&sem->wait_lock); __set_current_state(TASK_RUNNING); lockevent_inc(rwsem_rlock_fail); @@ -1020,9 +1061,7 @@ static struct rw_semaphore * rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) { long count; - enum writer_wait_state wstate; struct rwsem_waiter waiter; - struct rw_semaphore *ret = sem; DEFINE_WAKE_Q(wake_q); /* do optimistic spinning and steal lock if possible */ @@ -1038,16 +1077,13 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) waiter.task = current; waiter.type = RWSEM_WAITING_FOR_WRITE; waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; + waiter.handoff_set = false; raw_spin_lock_irq(&sem->wait_lock); - - /* account for this before adding a new element to the list */ - wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST; - - list_add_tail(&waiter.list, &sem->wait_list); + rwsem_add_waiter(sem, &waiter); /* we're now waiting on the lock */ - if (wstate == WRITER_NOT_FIRST) { + if (rwsem_first_waiter(sem) != &waiter) { count = atomic_long_read(&sem->count); /* @@ -1083,13 +1119,16 @@ wait: /* wait until we successfully acquire the lock */ set_current_state(state); for (;;) { - if (rwsem_try_write_lock(sem, wstate)) { + if (rwsem_try_write_lock(sem, &waiter)) { /* rwsem_try_write_lock() implies ACQUIRE on success */ break; } raw_spin_unlock_irq(&sem->wait_lock); + if (signal_pending_state(state, current)) + goto out_nolock; + /* * After setting the handoff bit and failing to acquire * the lock, attempt to spin on owner to accelerate lock @@ -1098,7 +1137,7 @@ wait: * In this case, we attempt to acquire the lock again * without sleeping. */ - if (wstate == WRITER_HANDOFF) { + if (waiter.handoff_set) { enum owner_state owner_state; preempt_disable(); @@ -1109,66 +1148,26 @@ wait: goto trylock_again; } - /* Block until there are no active lockers. */ - for (;;) { - if (signal_pending_state(state, current)) - goto out_nolock; - - schedule(); - lockevent_inc(rwsem_sleep_writer); - set_current_state(state); - /* - * If HANDOFF bit is set, unconditionally do - * a trylock. - */ - if (wstate == WRITER_HANDOFF) - break; - - if ((wstate == WRITER_NOT_FIRST) && - (rwsem_first_waiter(sem) == &waiter)) - wstate = WRITER_FIRST; - - count = atomic_long_read(&sem->count); - if (!(count & RWSEM_LOCK_MASK)) - break; - - /* - * The setting of the handoff bit is deferred - * until rwsem_try_write_lock() is called. - */ - if ((wstate == WRITER_FIRST) && (rt_task(current) || - time_after(jiffies, waiter.timeout))) { - wstate = WRITER_HANDOFF; - lockevent_inc(rwsem_wlock_handoff); - break; - } - } + schedule(); + lockevent_inc(rwsem_sleep_writer); + set_current_state(state); trylock_again: raw_spin_lock_irq(&sem->wait_lock); } __set_current_state(TASK_RUNNING); - list_del(&waiter.list); raw_spin_unlock_irq(&sem->wait_lock); lockevent_inc(rwsem_wlock); - - return ret; + return sem; out_nolock: __set_current_state(TASK_RUNNING); raw_spin_lock_irq(&sem->wait_lock); - list_del(&waiter.list); - - if (unlikely(wstate == WRITER_HANDOFF)) - atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count); - - if (list_empty(&sem->wait_list)) - atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count); - else + rwsem_del_waiter(sem, &waiter); + if (!list_empty(&sem->wait_list)) rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); raw_spin_unlock_irq(&sem->wait_lock); wake_up_q(&wake_q); lockevent_inc(rwsem_wlock_fail); - return ERR_PTR(-EINTR); } @@ -1249,17 +1248,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); - /* - * Optimize for the case when the rwsem is not locked at all. - */ - tmp = RWSEM_UNLOCKED_VALUE; - do { + tmp = atomic_long_read(&sem->count); + while (!(tmp & RWSEM_READ_FAILED_MASK)) { if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, - tmp + RWSEM_READER_BIAS)) { + tmp + RWSEM_READER_BIAS)) { rwsem_set_reader_owned(sem); return 1; } - } while (!(tmp & RWSEM_READ_FAILED_MASK)); + } return 0; } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3c9b0fda64ac..77563109c0ea 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1918,7 +1918,7 @@ static void __init init_uclamp_rq(struct rq *rq) }; } - rq->uclamp_flags = 0; + rq->uclamp_flags = UCLAMP_FLAG_IDLE; } static void __init init_uclamp(void) @@ -6617,11 +6617,11 @@ static int __init setup_preempt_mode(char *str) int mode = sched_dynamic_mode(str); if (mode < 0) { pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); - return 1; + return 0; } sched_dynamic_update(mode); - return 0; + return 1; } __setup("preempt=", setup_preempt_mode); @@ -8619,9 +8619,6 @@ void __init init_idle(struct task_struct *idle, int cpu) idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY; kthread_set_per_cpu(idle, cpu); - scs_task_reset(idle); - kasan_unpoison_task_stack(idle); - #ifdef CONFIG_SMP /* * It's possible that init_idle() gets called multiple times on a task, @@ -8777,7 +8774,6 @@ void idle_task_exit(void) finish_arch_post_lock_switch(); } - scs_task_reset(current); /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ } diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 872e481d5098..9392aea1804e 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -615,7 +615,8 @@ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) .sum_exec_runtime = p->se.sum_exec_runtime, }; - task_cputime(p, &cputime.utime, &cputime.stime); + if (task_cputime(p, &cputime.utime, &cputime.stime)) + cputime.sum_exec_runtime = task_sched_runtime(p); cputime_adjust(&cputime, &p->prev_cputime, ut, st); } EXPORT_SYMBOL_GPL(task_cputime_adjusted); @@ -828,19 +829,21 @@ u64 task_gtime(struct task_struct *t) * add up the pending nohz execution time since the last * cputime snapshot. */ -void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) +bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime) { struct vtime *vtime = &t->vtime; unsigned int seq; u64 delta; + int ret; if (!vtime_accounting_enabled()) { *utime = t->utime; *stime = t->stime; - return; + return false; } do { + ret = false; seq = read_seqcount_begin(&vtime->seqcount); *utime = t->utime; @@ -850,6 +853,7 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) if (vtime->state < VTIME_SYS) continue; + ret = true; delta = vtime_delta(vtime); /* @@ -861,6 +865,8 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) else *utime += vtime->utime + delta; } while (read_seqcount_retry(&vtime->seqcount, seq)); + + return ret; } static int vtime_state_fetch(struct vtime *vtime, int cpu) diff --git a/kernel/softirq.c b/kernel/softirq.c index 322b65d45676..41f470929e99 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -595,7 +595,8 @@ void irq_enter_rcu(void) { __irq_enter_raw(); - if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)) + if (tick_nohz_full_cpu(smp_processor_id()) || + (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))) tick_irq_enter(); account_hardirq_enter(current); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 6bffe5af8cb1..17a283ce2b20 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1375,6 +1375,13 @@ static inline void tick_nohz_irq_enter(void) now = ktime_get(); if (ts->idle_active) tick_nohz_stop_idle(ts, now); + /* + * If all CPUs are idle. We may need to update a stale jiffies value. + * Note nohz_full is a special case: a timekeeper is guaranteed to stay + * alive but it might be busy looping with interrupts disabled in some + * rare case (typically stop machine). So we must make sure we have a + * last resort. + */ if (ts->tick_stopped) tick_nohz_update_jiffies(now); } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 1cd647c010da..623dd0684429 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1402,9 +1402,6 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, void *, buf, u32, size, u64, flags) { -#ifndef CONFIG_X86 - return -ENOENT; -#else static const u32 br_entry_size = sizeof(struct perf_branch_entry); struct perf_branch_stack *br_stack = ctx->data->br_stack; u32 to_copy; @@ -1413,7 +1410,7 @@ BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, return -EINVAL; if (unlikely(!br_stack)) - return -EINVAL; + return -ENOENT; if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) return br_stack->nr * br_entry_size; @@ -1425,7 +1422,6 @@ BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, memcpy(buf, br_stack->entries, to_copy); return to_copy; -#endif } static const struct bpf_func_proto bpf_read_branch_records_proto = { diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6b60ab9475ed..38715aa6cfdf 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1366,14 +1366,26 @@ __event_trigger_test_discard(struct trace_event_file *file, if (eflags & EVENT_FILE_FL_TRIGGER_COND) *tt = event_triggers_call(file, buffer, entry, event); - if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || - (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && - !filter_match_preds(file->filter, entry))) { - __trace_event_discard_commit(buffer, event); - return true; - } + if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED | + EVENT_FILE_FL_FILTERED | + EVENT_FILE_FL_PID_FILTER)))) + return false; + + if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) + goto discard; + + if (file->flags & EVENT_FILE_FL_FILTERED && + !filter_match_preds(file->filter, entry)) + goto discard; + + if ((file->flags & EVENT_FILE_FL_PID_FILTER) && + trace_event_ignore_this_pid(file)) + goto discard; return false; + discard: + __trace_event_discard_commit(buffer, event); + return true; } /** diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 4021b9a79f93..92be9cb1d7d4 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2678,12 +2678,24 @@ static struct trace_event_file * trace_create_new_event(struct trace_event_call *call, struct trace_array *tr) { + struct trace_pid_list *no_pid_list; + struct trace_pid_list *pid_list; struct trace_event_file *file; + unsigned int first; file = kmem_cache_alloc(file_cachep, GFP_TRACE); if (!file) return NULL; + pid_list = rcu_dereference_protected(tr->filtered_pids, + lockdep_is_held(&event_mutex)); + no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, + lockdep_is_held(&event_mutex)); + + if (!trace_pid_list_first(pid_list, &first) || + !trace_pid_list_first(no_pid_list, &first)) + file->flags |= EVENT_FILE_FL_PID_FILTER; + file->event_call = call; file->tr = tr; atomic_set(&file->sm_ref, 0); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 9555b8e1d1e3..319f9c8ca7e7 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -3757,7 +3757,7 @@ static int check_synth_field(struct synth_event *event, if (strcmp(field->type, hist_field->type) != 0) { if (field->size != hist_field->size || - field->is_signed != hist_field->is_signed) + (!field->is_string && field->is_signed != hist_field->is_signed)) return -EINVAL; } diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index 39bb56d2dcbe..9628b5571846 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -15,6 +15,7 @@ #include <linux/jhash.h> #include <linux/slab.h> #include <linux/sort.h> +#include <linux/kmemleak.h> #include "tracing_map.h" #include "trace.h" @@ -307,6 +308,7 @@ static void tracing_map_array_free(struct tracing_map_array *a) for (i = 0; i < a->n_pages; i++) { if (!a->pages[i]) break; + kmemleak_free(a->pages[i]); free_page((unsigned long)a->pages[i]); } @@ -342,6 +344,7 @@ static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL); if (!a->pages[i]) goto free; + kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL); } out: return a; diff --git a/lib/Kconfig b/lib/Kconfig index 5e7165e6a346..655b0e43f260 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -680,6 +680,11 @@ config STACK_HASH_ORDER Select the hash size as a power of 2 for the stackdepot hash table. Choose a lower value to reduce the memory impact. +config REF_TRACKER + bool + depends on STACKTRACE_SUPPORT + select STACKDEPOT + config SBITMAP bool diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5c12bde10996..c77fe36bb3d8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -316,6 +316,7 @@ config DEBUG_INFO_BTF bool "Generate BTF typeinfo" depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST + depends on BPF_SYSCALL help Generate deduplicated BTF type information from DWARF debug info. Turning this on expects presence of pahole tool, which will convert @@ -598,6 +599,11 @@ config DEBUG_MISC Say Y here if you need to enable miscellaneous debug code that should be under a more specific debug option but isn't. +menu "Networking Debugging" + +source "net/Kconfig.debug" + +endmenu # "Networking Debugging" menu "Memory Debugging" @@ -2106,6 +2112,16 @@ config BACKTRACE_SELF_TEST Say N if you are unsure. +config TEST_REF_TRACKER + tristate "Self test for reference tracker" + depends on DEBUG_KERNEL && STACKTRACE_SUPPORT + select REF_TRACKER + help + This option provides a kernel module performing tests + using reference tracker infrastructure. + + Say N if you are unsure. + config RBTREE_TEST tristate "Red-Black tree test" depends on DEBUG_KERNEL diff --git a/lib/Makefile b/lib/Makefile index 364c23f15578..b213a7bbf3fd 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -101,7 +101,7 @@ obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o obj-$(CONFIG_TEST_HMM) += test_hmm.o obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o - +obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o # # CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns # off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS @@ -270,6 +270,8 @@ obj-$(CONFIG_STACKDEPOT) += stackdepot.o KASAN_SANITIZE_stackdepot.o := n KCOV_INSTRUMENT_stackdepot.o := n +obj-$(CONFIG_REF_TRACKER) += ref_tracker.o + libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ fdt_empty_tree.o fdt_addresses.o $(foreach file, $(libfdt_files), \ diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c new file mode 100644 index 000000000000..0ae2e66dcf0f --- /dev/null +++ b/lib/ref_tracker.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/export.h> +#include <linux/ref_tracker.h> +#include <linux/slab.h> +#include <linux/stacktrace.h> +#include <linux/stackdepot.h> + +#define REF_TRACKER_STACK_ENTRIES 16 + +struct ref_tracker { + struct list_head head; /* anchor into dir->list or dir->quarantine */ + bool dead; + depot_stack_handle_t alloc_stack_handle; + depot_stack_handle_t free_stack_handle; +}; + +void ref_tracker_dir_exit(struct ref_tracker_dir *dir) +{ + struct ref_tracker *tracker, *n; + unsigned long flags; + bool leak = false; + + spin_lock_irqsave(&dir->lock, flags); + list_for_each_entry_safe(tracker, n, &dir->quarantine, head) { + list_del(&tracker->head); + kfree(tracker); + dir->quarantine_avail++; + } + list_for_each_entry_safe(tracker, n, &dir->list, head) { + pr_err("leaked reference.\n"); + if (tracker->alloc_stack_handle) + stack_depot_print(tracker->alloc_stack_handle); + leak = true; + list_del(&tracker->head); + kfree(tracker); + } + spin_unlock_irqrestore(&dir->lock, flags); + WARN_ON_ONCE(leak); + WARN_ON_ONCE(refcount_read(&dir->untracked) != 1); +} +EXPORT_SYMBOL(ref_tracker_dir_exit); + +void ref_tracker_dir_print(struct ref_tracker_dir *dir, + unsigned int display_limit) +{ + struct ref_tracker *tracker; + unsigned long flags; + unsigned int i = 0; + + spin_lock_irqsave(&dir->lock, flags); + list_for_each_entry(tracker, &dir->list, head) { + if (i < display_limit) { + pr_err("leaked reference.\n"); + if (tracker->alloc_stack_handle) + stack_depot_print(tracker->alloc_stack_handle); + i++; + } else { + break; + } + } + spin_unlock_irqrestore(&dir->lock, flags); +} +EXPORT_SYMBOL(ref_tracker_dir_print); + +int ref_tracker_alloc(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp, + gfp_t gfp) +{ + unsigned long entries[REF_TRACKER_STACK_ENTRIES]; + struct ref_tracker *tracker; + unsigned int nr_entries; + unsigned long flags; + + *trackerp = tracker = kzalloc(sizeof(*tracker), gfp | __GFP_NOFAIL); + if (unlikely(!tracker)) { + pr_err_once("memory allocation failure, unreliable refcount tracker.\n"); + refcount_inc(&dir->untracked); + return -ENOMEM; + } + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); + nr_entries = filter_irq_stacks(entries, nr_entries); + tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp); + + spin_lock_irqsave(&dir->lock, flags); + list_add(&tracker->head, &dir->list); + spin_unlock_irqrestore(&dir->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(ref_tracker_alloc); + +int ref_tracker_free(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp) +{ + unsigned long entries[REF_TRACKER_STACK_ENTRIES]; + struct ref_tracker *tracker = *trackerp; + depot_stack_handle_t stack_handle; + unsigned int nr_entries; + unsigned long flags; + + if (!tracker) { + refcount_dec(&dir->untracked); + return -EEXIST; + } + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); + nr_entries = filter_irq_stacks(entries, nr_entries); + stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC); + + spin_lock_irqsave(&dir->lock, flags); + if (tracker->dead) { + pr_err("reference already released.\n"); + if (tracker->alloc_stack_handle) { + pr_err("allocated in:\n"); + stack_depot_print(tracker->alloc_stack_handle); + } + if (tracker->free_stack_handle) { + pr_err("freed in:\n"); + stack_depot_print(tracker->free_stack_handle); + } + spin_unlock_irqrestore(&dir->lock, flags); + WARN_ON_ONCE(1); + return -EINVAL; + } + tracker->dead = true; + + tracker->free_stack_handle = stack_handle; + + list_move_tail(&tracker->head, &dir->quarantine); + if (!dir->quarantine_avail) { + tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head); + list_del(&tracker->head); + } else { + dir->quarantine_avail--; + tracker = NULL; + } + spin_unlock_irqrestore(&dir->lock, flags); + + kfree(tracker); + return 0; +} +EXPORT_SYMBOL_GPL(ref_tracker_free); diff --git a/lib/siphash.c b/lib/siphash.c index a90112ee72a1..72b9068ab57b 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -49,6 +49,7 @@ SIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) POSTAMBLE } EXPORT_SYMBOL(__siphash_aligned); +#endif -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) POSTAMBLE } EXPORT_SYMBOL(__siphash_unaligned); -#endif /** * siphash_1u64 - compute 64-bit siphash PRF value of a u64 @@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32); HSIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); +#endif -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { @@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len, HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); -#endif /** * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 @@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32); HSIPROUND; \ return v1 ^ v3; +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u32)); @@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); +#endif -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { @@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len, HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); -#endif /** * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 diff --git a/lib/test_bpf.c b/lib/test_bpf.c index adae39567264..0c5cb2d6436a 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -14683,7 +14683,7 @@ static struct tail_call_test tail_call_tests[] = { BPF_EXIT_INSN(), }, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, - .result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS, + .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS, }, { "Tail call count preserved across function calls", @@ -14705,7 +14705,7 @@ static struct tail_call_test tail_call_tests[] = { }, .stack_depth = 8, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, - .result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS, + .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS, }, { "Tail call error path, NULL target", diff --git a/lib/test_ref_tracker.c b/lib/test_ref_tracker.c new file mode 100644 index 000000000000..19d7dec70cc6 --- /dev/null +++ b/lib/test_ref_tracker.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Referrence tracker self test. + * + * Copyright (c) 2021 Eric Dumazet <edumazet@google.com> + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/ref_tracker.h> +#include <linux/slab.h> +#include <linux/timer.h> + +static struct ref_tracker_dir ref_dir; +static struct ref_tracker *tracker[20]; + +#define TRT_ALLOC(X) static noinline void \ + alloctest_ref_tracker_alloc##X(struct ref_tracker_dir *dir, \ + struct ref_tracker **trackerp) \ + { \ + ref_tracker_alloc(dir, trackerp, GFP_KERNEL); \ + } + +TRT_ALLOC(1) +TRT_ALLOC(2) +TRT_ALLOC(3) +TRT_ALLOC(4) +TRT_ALLOC(5) +TRT_ALLOC(6) +TRT_ALLOC(7) +TRT_ALLOC(8) +TRT_ALLOC(9) +TRT_ALLOC(10) +TRT_ALLOC(11) +TRT_ALLOC(12) +TRT_ALLOC(13) +TRT_ALLOC(14) +TRT_ALLOC(15) +TRT_ALLOC(16) +TRT_ALLOC(17) +TRT_ALLOC(18) +TRT_ALLOC(19) + +#undef TRT_ALLOC + +static noinline void +alloctest_ref_tracker_free(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp) +{ + ref_tracker_free(dir, trackerp); +} + + +static struct timer_list test_ref_tracker_timer; +static atomic_t test_ref_timer_done = ATOMIC_INIT(0); + +static void test_ref_tracker_timer_func(struct timer_list *t) +{ + ref_tracker_alloc(&ref_dir, &tracker[0], GFP_ATOMIC); + atomic_set(&test_ref_timer_done, 1); +} + +static int __init test_ref_tracker_init(void) +{ + int i; + + ref_tracker_dir_init(&ref_dir, 100); + + timer_setup(&test_ref_tracker_timer, test_ref_tracker_timer_func, 0); + mod_timer(&test_ref_tracker_timer, jiffies + 1); + + alloctest_ref_tracker_alloc1(&ref_dir, &tracker[1]); + alloctest_ref_tracker_alloc2(&ref_dir, &tracker[2]); + alloctest_ref_tracker_alloc3(&ref_dir, &tracker[3]); + alloctest_ref_tracker_alloc4(&ref_dir, &tracker[4]); + alloctest_ref_tracker_alloc5(&ref_dir, &tracker[5]); + alloctest_ref_tracker_alloc6(&ref_dir, &tracker[6]); + alloctest_ref_tracker_alloc7(&ref_dir, &tracker[7]); + alloctest_ref_tracker_alloc8(&ref_dir, &tracker[8]); + alloctest_ref_tracker_alloc9(&ref_dir, &tracker[9]); + alloctest_ref_tracker_alloc10(&ref_dir, &tracker[10]); + alloctest_ref_tracker_alloc11(&ref_dir, &tracker[11]); + alloctest_ref_tracker_alloc12(&ref_dir, &tracker[12]); + alloctest_ref_tracker_alloc13(&ref_dir, &tracker[13]); + alloctest_ref_tracker_alloc14(&ref_dir, &tracker[14]); + alloctest_ref_tracker_alloc15(&ref_dir, &tracker[15]); + alloctest_ref_tracker_alloc16(&ref_dir, &tracker[16]); + alloctest_ref_tracker_alloc17(&ref_dir, &tracker[17]); + alloctest_ref_tracker_alloc18(&ref_dir, &tracker[18]); + alloctest_ref_tracker_alloc19(&ref_dir, &tracker[19]); + + /* free all trackers but first 0 and 1. */ + for (i = 2; i < ARRAY_SIZE(tracker); i++) + alloctest_ref_tracker_free(&ref_dir, &tracker[i]); + + /* Attempt to free an already freed tracker. */ + alloctest_ref_tracker_free(&ref_dir, &tracker[2]); + + while (!atomic_read(&test_ref_timer_done)) + msleep(1); + + /* This should warn about tracker[0] & tracker[1] being not freed. */ + ref_tracker_dir_exit(&ref_dir); + + return 0; +} + +static void __exit test_ref_tracker_exit(void) +{ +} + +module_init(test_ref_tracker_init); +module_exit(test_ref_tracker_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 35fe49080ee9..47f47f60440e 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -13,6 +13,7 @@ #include <linux/mmu_notifier.h> #include <linux/page_idle.h> #include <linux/pagewalk.h> +#include <linux/sched/mm.h> #include "prmtv-common.h" diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 852041f6be41..2a9627dc784c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -35,6 +35,7 @@ #include <linux/memblock.h> #include <linux/compaction.h> #include <linux/rmap.h> +#include <linux/module.h> #include <asm/tlbflush.h> diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 16f706c55d92..2b5531840583 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -30,6 +30,7 @@ #include <linux/swap_slots.h> #include <linux/cpu.h> #include <linux/cpumask.h> +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <linux/mm.h> diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 866556e041b7..26d031a43cc1 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -616,7 +616,7 @@ static int vlan_dev_init(struct net_device *dev) return -ENOMEM; /* Get vlan's reference to real_dev */ - dev_hold(real_dev); + dev_hold_track(real_dev, &vlan->dev_tracker, GFP_KERNEL); return 0; } @@ -848,7 +848,7 @@ static void vlan_dev_free(struct net_device *dev) vlan->vlan_pcpu_stats = NULL; /* Get rid of the vlan's reference to real_dev */ - dev_put(vlan->real_dev); + dev_put_track(vlan->real_dev, &vlan->dev_tracker); } void vlan_setup(struct net_device *dev) diff --git a/net/Kconfig.debug b/net/Kconfig.debug new file mode 100644 index 000000000000..2f50611df858 --- /dev/null +++ b/net/Kconfig.debug @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config NET_DEV_REFCNT_TRACKER + bool "Enable net device refcount tracking" + depends on DEBUG_KERNEL && STACKTRACE_SUPPORT + select REF_TRACKER + default n + help + Enable debugging feature to track device references. + This adds memory and cpu costs. + +config NET_NS_REFCNT_TRACKER + bool "Enable networking namespace refcount tracking" + depends on DEBUG_KERNEL && STACKTRACE_SUPPORT + select REF_TRACKER + default n + help + Enable debugging feature to track netns references. + This adds memory and cpu costs. diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index d0a043a51848..256fadb94df3 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -58,7 +58,7 @@ void ax25_dev_device_up(struct net_device *dev) dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; - dev_hold(dev); + dev_hold_track(dev, &ax25_dev->dev_tracker, GFP_ATOMIC); ax25_dev->forward = NULL; ax25_dev->values[AX25_VALUES_IPDEFMODE] = AX25_DEF_IPDEFMODE; @@ -114,7 +114,7 @@ void ax25_dev_device_down(struct net_device *dev) ax25_dev_list = s->next; spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; - dev_put(dev); + dev_put_track(dev, &ax25_dev->dev_tracker); kfree(ax25_dev); return; } @@ -124,7 +124,7 @@ void ax25_dev_device_down(struct net_device *dev) s->next = ax25_dev->next; spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; - dev_put(dev); + dev_put_track(dev, &ax25_dev->dev_tracker); kfree(ax25_dev); return; } @@ -188,7 +188,7 @@ void __exit ax25_dev_free(void) ax25_dev = ax25_dev_list; while (ax25_dev != NULL) { s = ax25_dev; - dev_put(ax25_dev->dev); + dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker); ax25_dev = ax25_dev->next; kfree(s); } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 3915832a03c2..a52ad81596b7 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -274,7 +274,7 @@ static void destroy_nbp(struct net_bridge_port *p) p->br = NULL; p->dev = NULL; - dev_put(dev); + dev_put_track(dev, &p->dev_tracker); kobject_put(&p->kobj); } @@ -423,7 +423,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, return ERR_PTR(-ENOMEM); p->br = br; - dev_hold(dev); + dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL); p->dev = dev; p->path_cost = port_cost(dev); p->priority = 0x8000 >> BR_PORT_BITS; @@ -434,7 +434,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, br_stp_port_timer_init(p); err = br_multicast_add_port(p); if (err) { - dev_put(dev); + dev_put_track(dev, &p->dev_tracker); kfree(p); p = ERR_PTR(err); } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c0efd697865a..af2b3512d86c 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -344,6 +344,7 @@ struct net_bridge_mdb_entry { struct net_bridge_port { struct net_bridge *br; struct net_device *dev; + netdevice_tracker dev_tracker; struct list_head list; unsigned long flags; diff --git a/net/core/dev.c b/net/core/dev.c index d30adecc2bb2..a855e41bbe39 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3719,8 +3719,12 @@ no_lock_out: * separate lock before trying to get qdisc main lock. * This permits qdisc->running owner to get the lock more * often and dequeue packets faster. + * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit + * and then other tasks will only enqueue packets. The packets will be + * sent after the qdisc owner is scheduled again. To prevent this + * scenario the task always serialize on the lock. */ - contended = qdisc_is_running(q); + contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT); if (unlikely(contended)) spin_lock(&q->busylock); @@ -4093,7 +4097,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ - if (txq->xmit_lock_owner != cpu) { + /* Other cpus might concurrently change txq->xmit_lock_owner + * to -1 or to their cpu id, but not to our id. + */ + if (READ_ONCE(txq->xmit_lock_owner) != cpu) { if (dev_xmit_recursion()) goto recursion_alert; @@ -6534,6 +6541,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) struct netdev_adjacent { struct net_device *dev; + netdevice_tracker dev_tracker; /* upper master flag, there can only be one master device per list */ bool master; @@ -7298,7 +7306,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, adj->ref_nr = 1; adj->private = private; adj->ignore = false; - dev_hold(adj_dev); + dev_hold_track(adj_dev, &adj->dev_tracker, GFP_KERNEL); pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n", dev->name, adj_dev->name, adj->ref_nr, adj_dev->name); @@ -7327,8 +7335,8 @@ remove_symlinks: if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); free_adj: + dev_put_track(adj_dev, &adj->dev_tracker); kfree(adj); - dev_put(adj_dev); return ret; } @@ -7369,7 +7377,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev, list_del_rcu(&adj->list); pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n", adj_dev->name, dev->name, adj_dev->name); - dev_put(adj_dev); + dev_put_track(adj_dev, &adj->dev_tracker); kfree_rcu(adj, rcu); } @@ -9861,6 +9869,7 @@ static void netdev_wait_allrefs(struct net_device *dev) netdev_unregister_timeout_secs * HZ)) { pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", dev->name, refcnt); + ref_tracker_dir_print(&dev->refcnt_tracker, 10); warning_time = jiffies; } } @@ -10151,6 +10160,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev = PTR_ALIGN(p, NETDEV_ALIGN); dev->padded = (char *)dev - (char *)p; + ref_tracker_dir_init(&dev->refcnt_tracker, 128); #ifdef CONFIG_PCPU_DEV_REFCNT dev->pcpu_refcnt = alloc_percpu(int); if (!dev->pcpu_refcnt) @@ -10267,6 +10277,7 @@ void free_netdev(struct net_device *dev) list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) netif_napi_del(p); + ref_tracker_dir_exit(&dev->refcnt_tracker); #ifdef CONFIG_PCPU_DEV_REFCNT free_percpu(dev->pcpu_refcnt); dev->pcpu_refcnt = NULL; diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index cbab5fec64b1..1b807d119da5 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -192,7 +192,7 @@ static int net_hwtstamp_validate(struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - if (cfg.flags) /* reserved for future extensions */ + if (cfg.flags & ~HWTSTAMP_FLAG_MASK) return -EINVAL; tx_type = cfg.tx_type; @@ -313,6 +313,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, int err; struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); const struct net_device_ops *ops; + netdevice_tracker dev_tracker; if (!dev) return -ENODEV; @@ -381,10 +382,10 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, return -ENODEV; if (!netif_is_bridge_master(dev)) return -EOPNOTSUPP; - dev_hold(dev); + dev_hold_track(dev, &dev_tracker, GFP_KERNEL); rtnl_unlock(); err = br_ioctl_call(net, netdev_priv(dev), cmd, ifr, NULL); - dev_put(dev); + dev_put_track(dev, &dev_tracker); rtnl_lock(); return err; diff --git a/net/core/devlink.c b/net/core/devlink.c index db3b52110cf2..0a9349a02cad 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4139,14 +4139,6 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) return err; } - if (info->attrs[DEVLINK_ATTR_NETNS_PID] || - info->attrs[DEVLINK_ATTR_NETNS_FD] || - info->attrs[DEVLINK_ATTR_NETNS_ID]) { - dest_net = devlink_netns_get(skb, info); - if (IS_ERR(dest_net)) - return PTR_ERR(dest_net); - } - if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION]) action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]); else @@ -4189,6 +4181,14 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } } + if (info->attrs[DEVLINK_ATTR_NETNS_PID] || + info->attrs[DEVLINK_ATTR_NETNS_FD] || + info->attrs[DEVLINK_ATTR_NETNS_ID]) { + dest_net = devlink_netns_get(skb, info); + if (IS_ERR(dest_net)) + return PTR_ERR(dest_net); + } + err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack); if (dest_net) diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 49442cae6f69..3d0ab2eec916 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -850,7 +850,7 @@ net_dm_hw_metadata_copy(const struct devlink_trap_metadata *metadata) } hw_metadata->input_dev = metadata->input_dev; - dev_hold(hw_metadata->input_dev); + dev_hold_track(hw_metadata->input_dev, &hw_metadata->dev_tracker, GFP_ATOMIC); return hw_metadata; @@ -864,9 +864,9 @@ free_hw_metadata: } static void -net_dm_hw_metadata_free(const struct devlink_trap_metadata *hw_metadata) +net_dm_hw_metadata_free(struct devlink_trap_metadata *hw_metadata) { - dev_put(hw_metadata->input_dev); + dev_put_track(hw_metadata->input_dev, &hw_metadata->dev_tracker); kfree(hw_metadata->fa_cookie); kfree(hw_metadata->trap_name); kfree(hw_metadata->trap_group_name); diff --git a/net/core/dst.c b/net/core/dst.c index 497ef9b3fc6a..d16c2c9bfebd 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -49,7 +49,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops, unsigned short flags) { dst->dev = dev; - dev_hold(dev); + dev_hold_track(dev, &dst->dev_tracker, GFP_ATOMIC); dst->ops = ops; dst_init_metrics(dst, dst_default_metrics.metrics, true); dst->expires = 0UL; @@ -117,7 +117,7 @@ struct dst_entry *dst_destroy(struct dst_entry * dst) if (dst->ops->destroy) dst->ops->destroy(dst); - dev_put(dst->dev); + dev_put_track(dst->dev, &dst->dev_tracker); lwtstate_put(dst->lwtstate); @@ -159,8 +159,8 @@ void dst_dev_put(struct dst_entry *dst) dst->input = dst_discard; dst->output = dst_discard_out; dst->dev = blackhole_netdev; - dev_hold(dst->dev); - dev_put(dev); + dev_replace_track(dev, blackhole_netdev, &dst->dev_tracker, + GFP_ATOMIC); } EXPORT_SYMBOL(dst_dev_put); diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c index be74ab4551c2..0ccfd5fa5cb9 100644 --- a/net/core/dst_cache.c +++ b/net/core/dst_cache.c @@ -162,3 +162,22 @@ void dst_cache_destroy(struct dst_cache *dst_cache) free_percpu(dst_cache->cache); } EXPORT_SYMBOL_GPL(dst_cache_destroy); + +void dst_cache_reset_now(struct dst_cache *dst_cache) +{ + int i; + + if (!dst_cache->cache) + return; + + dst_cache->reset_ts = jiffies; + for_each_possible_cpu(i) { + struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i); + struct dst_entry *dst = idst->dst; + + idst->cookie = 0; + idst->dst = NULL; + dst_release(dst); + } +} +EXPORT_SYMBOL_GPL(dst_cache_reset_now); diff --git a/net/core/failover.c b/net/core/failover.c index b5cd3c727285..dcaa92a85ea2 100644 --- a/net/core/failover.c +++ b/net/core/failover.c @@ -252,7 +252,7 @@ struct failover *failover_register(struct net_device *dev, return ERR_PTR(-ENOMEM); rcu_assign_pointer(failover->ops, ops); - dev_hold(dev); + dev_hold_track(dev, &failover->dev_tracker, GFP_KERNEL); dev->priv_flags |= IFF_FAILOVER; rcu_assign_pointer(failover->failover_dev, dev); @@ -285,7 +285,7 @@ void failover_unregister(struct failover *failover) failover_dev->name); failover_dev->priv_flags &= ~IFF_FAILOVER; - dev_put(failover_dev); + dev_put_track(failover_dev, &failover->dev_tracker); spin_lock(&failover_lock); list_del(&failover->list); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 79df7cd9dbc1..1bb567a3b329 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -323,7 +323,7 @@ jumped: if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress, fib6_rule_suppress, fib4_rule_suppress, - rule, arg)) + rule, flags, arg)) continue; if (err != -EAGAIN) { diff --git a/net/core/filter.c b/net/core/filter.c index 8271624a19aa..e4cc3aff5bf7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1242,10 +1242,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) int err, new_len, old_len = fp->len; bool seen_ld_abs = false; - /* We are free to overwrite insns et al right here as it - * won't be used at this point in time anymore internally - * after the migration to the internal BPF instruction - * representation. + /* We are free to overwrite insns et al right here as it won't be used at + * this point in time anymore internally after the migration to the eBPF + * instruction representation. */ BUILD_BUG_ON(sizeof(struct sock_filter) != sizeof(struct bpf_insn)); @@ -1336,8 +1335,8 @@ static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, */ bpf_jit_compile(fp); - /* JIT compiler couldn't process this filter, so do the - * internal BPF translation for the optimized interpreter. + /* JIT compiler couldn't process this filter, so do the eBPF translation + * for the optimized interpreter. */ if (!fp->jited) fp = bpf_migrate_filter(fp); diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 9599afd0862d..b0f5344d1185 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -109,7 +109,7 @@ static void linkwatch_add_event(struct net_device *dev) spin_lock_irqsave(&lweventlist_lock, flags); if (list_empty(&dev->link_watch_list)) { list_add_tail(&dev->link_watch_list, &lweventlist); - dev_hold(dev); + dev_hold_track(dev, &dev->linkwatch_dev_tracker, GFP_ATOMIC); } spin_unlock_irqrestore(&lweventlist_lock, flags); } @@ -166,6 +166,9 @@ static void linkwatch_do_dev(struct net_device *dev) netdev_state_change(dev); } + /* Note: our callers are responsible for + * calling netdev_tracker_free(). + */ dev_put(dev); } @@ -209,6 +212,10 @@ static void __linkwatch_run_queue(int urgent_only) list_add_tail(&dev->link_watch_list, &lweventlist); continue; } + /* We must free netdev tracker under + * the spinlock protection. + */ + netdev_tracker_free(dev, &dev->linkwatch_dev_tracker); spin_unlock_irq(&lweventlist_lock); linkwatch_do_dev(dev); do_dev--; @@ -232,6 +239,10 @@ void linkwatch_forget_dev(struct net_device *dev) if (!list_empty(&dev->link_watch_list)) { list_del_init(&dev->link_watch_list); clean = 1; + /* We must release netdev tracker under + * the spinlock protection. + */ + netdev_tracker_free(dev, &dev->linkwatch_dev_tracker); } spin_unlock_irqrestore(&lweventlist_lock, flags); if (clean) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 72ba027c34cf..213cb7b26b7a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -624,7 +624,7 @@ ___neigh_create(struct neigh_table *tbl, const void *pkey, memcpy(n->primary_key, pkey, key_len); n->dev = dev; - dev_hold(dev); + dev_hold_track(dev, &n->dev_tracker, GFP_ATOMIC); /* Protocol specific setup. */ if (tbl->constructor && (error = tbl->constructor(n)) < 0) { @@ -763,18 +763,17 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, ASSERT_RTNL(); - n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL); + n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL); if (!n) goto out; - n->protocol = 0; write_pnet(&n->net, net); memcpy(n->key, pkey, key_len); n->dev = dev; - dev_hold(dev); + dev_hold_track(dev, &n->dev_tracker, GFP_KERNEL); if (tbl->pconstructor && tbl->pconstructor(n)) { - dev_put(dev); + dev_put_track(dev, &n->dev_tracker); kfree(n); n = NULL; goto out; @@ -806,7 +805,7 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, write_unlock_bh(&tbl->lock); if (tbl->pdestructor) tbl->pdestructor(n); - dev_put(n->dev); + dev_put_track(n->dev, &n->dev_tracker); kfree(n); return 0; } @@ -839,7 +838,7 @@ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, n->next = NULL; if (tbl->pdestructor) tbl->pdestructor(n); - dev_put(n->dev); + dev_put_track(n->dev, &n->dev_tracker); kfree(n); } return -ENOENT; @@ -880,7 +879,7 @@ void neigh_destroy(struct neighbour *neigh) if (dev->netdev_ops->ndo_neigh_destroy) dev->netdev_ops->ndo_neigh_destroy(dev, neigh); - dev_put(dev); + dev_put_track(dev, &neigh->dev_tracker); neigh_parms_put(neigh->parms); neigh_dbg(2, "neigh %p is destroyed\n", neigh); @@ -1666,13 +1665,13 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, refcount_set(&p->refcnt, 1); p->reachable_time = neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); - dev_hold(dev); + dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL); p->dev = dev; write_pnet(&p->net, net); p->sysctl_table = NULL; if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { - dev_put(dev); + dev_put_track(dev, &p->dev_tracker); kfree(p); return NULL; } @@ -1703,7 +1702,7 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) list_del(&parms->list); parms->dead = 1; write_unlock_bh(&tbl->lock); - dev_put(parms->dev); + dev_put_track(parms->dev, &parms->dev_tracker); call_rcu(&parms->rcu_head, neigh_rcu_free_parms); } EXPORT_SYMBOL(neigh_parms_release); @@ -3771,10 +3770,6 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, neigh_proc_base_reachable_time; } - /* Don't export sysctls to unprivileged users */ - if (neigh_parms_net(p)->user_ns != &init_user_ns) - t->neigh_vars[0].procname = NULL; - switch (neigh_parms_family(p)) { case AF_INET: p_name = "ipv4"; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index affe34d71d31..53ea262ecafd 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1004,7 +1004,7 @@ static void rx_queue_release(struct kobject *kobj) #endif memset(kobj, 0, sizeof(*kobj)); - dev_put(queue->dev); + dev_put_track(queue->dev, &queue->dev_tracker); } static const void *rx_queue_namespace(struct kobject *kobj) @@ -1044,7 +1044,7 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) /* Kobject_put later will trigger rx_queue_release call which * decreases dev refcount: Take that reference here */ - dev_hold(queue->dev); + dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL); kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, @@ -1607,7 +1607,7 @@ static void netdev_queue_release(struct kobject *kobj) struct netdev_queue *queue = to_netdev_queue(kobj); memset(kobj, 0, sizeof(*kobj)); - dev_put(queue->dev); + dev_put_track(queue->dev, &queue->dev_tracker); } static const void *netdev_queue_namespace(struct kobject *kobj) @@ -1647,7 +1647,7 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) /* Kobject_put later will trigger netdev_queue_release call * which decreases dev refcount: Take that reference here */ - dev_hold(queue->dev); + dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL); kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, @@ -1694,6 +1694,13 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) int i; int error = 0; + /* Tx queue kobjects are allowed to be updated when a device is being + * unregistered, but solely to remove queues from qdiscs. Any path + * adding queues should be fixed. + */ + WARN(dev->reg_state == NETREG_UNREGISTERING && new_num > old_num, + "New queues can't be registered after device unregistration."); + for (i = old_num; i < new_num; i++) { error = netdev_queue_add_kobject(dev, i); if (error) { @@ -1808,6 +1815,9 @@ static void remove_queue_kobjects(struct net_device *dev) net_rx_queue_update_kobjects(dev, real_rx, 0); netdev_queue_update_kobjects(dev, real_tx, 0); + + dev->real_num_rx_queues = 0; + dev->real_num_tx_queues = 0; #ifdef CONFIG_SYSFS kset_unregister(dev->queues_kset); #endif diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 202fa5eacd0f..9b7171c40434 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -311,6 +311,8 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) LIST_HEAD(net_exit_list); refcount_set(&net->ns.count, 1); + ref_tracker_dir_init(&net->refcnt_tracker, 128); + refcount_set(&net->passive, 1); get_random_bytes(&net->hash_mix, sizeof(u32)); preempt_disable(); @@ -635,6 +637,7 @@ static DECLARE_WORK(net_cleanup_work, cleanup_net); void __put_net(struct net *net) { + ref_tracker_dir_exit(&net->refcnt_tracker); /* Cleanup the network namespace in process context */ if (llist_add(&net->cleanup_list, &cleanup_list)) queue_work(netns_wq, &net_cleanup_work); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index edfc0f8011f8..db724463e7cd 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -776,7 +776,7 @@ put_noaddr: err = __netpoll_setup(np, ndev); if (err) goto put; - + netdev_tracker_alloc(ndev, &np->dev_tracker, GFP_KERNEL); rtnl_unlock(); return 0; @@ -853,7 +853,7 @@ void netpoll_cleanup(struct netpoll *np) if (!np->dev) goto out; __netpoll_cleanup(np); - dev_put(np->dev); + dev_put_track(np->dev, &np->dev_tracker); np->dev = NULL; out: rtnl_unlock(); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index a3d74e2704c4..560a5e712dc3 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -410,6 +410,7 @@ struct pktgen_dev { * device name (not when the inject is * started as it used to do.) */ + netdevice_tracker dev_tracker; char odevname[32]; struct flow_state *flows; unsigned int cflows; /* Concurrent flows (config) */ @@ -2099,7 +2100,7 @@ static int pktgen_setup_dev(const struct pktgen_net *pn, /* Clean old setups */ if (pkt_dev->odev) { - dev_put(pkt_dev->odev); + dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker); pkt_dev->odev = NULL; } @@ -2117,6 +2118,7 @@ static int pktgen_setup_dev(const struct pktgen_net *pn, err = -ENETDOWN; } else { pkt_dev->odev = odev; + netdev_tracker_alloc(odev, &pkt_dev->dev_tracker, GFP_KERNEL); return 0; } @@ -3805,7 +3807,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) return add_dev_to_thread(t, pkt_dev); out2: - dev_put(pkt_dev->odev); + dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker); out1: #ifdef CONFIG_XFRM free_SAs(pkt_dev); @@ -3899,7 +3901,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, /* Dis-associate from the interface */ if (pkt_dev->odev) { - dev_put(pkt_dev->odev); + dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker); pkt_dev->odev = NULL; } diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 1ae52ac943f6..8eb671c827f9 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -1124,6 +1124,8 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) { + psock_set_prog(&psock->progs.stream_parser, NULL); + if (!psock->saved_data_ready) return; @@ -1212,6 +1214,9 @@ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock) void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock) { + psock_set_prog(&psock->progs.stream_verdict, NULL); + psock_set_prog(&psock->progs.skb_verdict, NULL); + if (!psock->saved_data_ready) return; diff --git a/net/core/sock.c b/net/core/sock.c index 4a499d255f40..e21485ab285d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1983,7 +1983,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, sock_lock_init(sk); sk->sk_net_refcnt = kern ? 0 : 1; if (likely(sk->sk_net_refcnt)) { - get_net(net); + get_net_track(net, &sk->ns_tracker, priority); sock_inuse_add(net, 1); } @@ -2039,7 +2039,7 @@ static void __sk_destruct(struct rcu_head *head) put_pid(sk->sk_peer_pid); if (likely(sk->sk_net_refcnt)) - put_net(sock_net(sk)); + put_net_track(sock_net(sk), &sk->ns_tracker); sk_prot_free(sk->sk_prot_creator, sk); } @@ -2126,7 +2126,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) /* SANITY */ if (likely(newsk->sk_net_refcnt)) { - get_net(sock_net(newsk)); + get_net_track(sock_net(newsk), &newsk->ns_tracker, priority); sock_inuse_add(sock_net(newsk), 1); } sk_node_init(&newsk->sk_node); @@ -3292,7 +3292,7 @@ void lock_sock_nested(struct sock *sk, int subclass) might_sleep(); spin_lock_bh(&sk->sk_lock.slock); - if (sk->sk_lock.owned) + if (sock_owned_by_user_nocheck(sk)) __lock_sock(sk); sk->sk_lock.owned = 1; spin_unlock_bh(&sk->sk_lock.slock); @@ -3323,7 +3323,7 @@ bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) might_sleep(); spin_lock_bh(&sk->sk_lock.slock); - if (!sk->sk_lock.owned) { + if (!sock_owned_by_user_nocheck(sk)) { /* * Fast path return with bottom halves disabled and * sock::sk_lock.slock held. diff --git a/net/core/sock_map.c b/net/core/sock_map.c index f39ef79ced67..4ca4b11f4e5f 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -167,8 +167,11 @@ static void sock_map_del_link(struct sock *sk, write_lock_bh(&sk->sk_callback_lock); if (strp_stop) sk_psock_stop_strp(sk, psock); - else + if (verdict_stop) sk_psock_stop_verdict(sk, psock); + + if (psock->psock_update_sk_prot) + psock->psock_update_sk_prot(sk, psock, false); write_unlock_bh(&sk->sk_callback_lock); } } @@ -282,6 +285,12 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk) if (msg_parser) psock_set_prog(&psock->progs.msg_parser, msg_parser); + if (stream_parser) + psock_set_prog(&psock->progs.stream_parser, stream_parser); + if (stream_verdict) + psock_set_prog(&psock->progs.stream_verdict, stream_verdict); + if (skb_verdict) + psock_set_prog(&psock->progs.skb_verdict, skb_verdict); ret = sock_map_init_proto(sk, psock); if (ret < 0) @@ -292,14 +301,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk) ret = sk_psock_init_strp(sk, psock); if (ret) goto out_unlock_drop; - psock_set_prog(&psock->progs.stream_verdict, stream_verdict); - psock_set_prog(&psock->progs.stream_parser, stream_parser); sk_psock_start_strp(sk, psock); } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) { - psock_set_prog(&psock->progs.stream_verdict, stream_verdict); sk_psock_start_verdict(sk,psock); } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) { - psock_set_prog(&psock->progs.skb_verdict, skb_verdict); sk_psock_start_verdict(sk, psock); } write_unlock_bh(&sk->sk_callback_lock); diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 826957b6442b..c18b22c0bf55 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -129,35 +129,52 @@ void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag) } } +struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst, + const struct net_device *br) +{ + struct dsa_port *dp; + + list_for_each_entry(dp, &dst->ports, list) + if (dsa_port_bridge_dev_get(dp) == br) + return dp->bridge; + + return NULL; +} + static int dsa_bridge_num_find(const struct net_device *bridge_dev) { struct dsa_switch_tree *dst; - struct dsa_port *dp; - /* When preparing the offload for a port, it will have a valid - * dp->bridge_dev pointer but a not yet valid dp->bridge_num. - * However there might be other ports having the same dp->bridge_dev - * and a valid dp->bridge_num, so just ignore this port. - */ - list_for_each_entry(dst, &dsa_tree_list, list) - list_for_each_entry(dp, &dst->ports, list) - if (dp->bridge_dev == bridge_dev && - dp->bridge_num != -1) - return dp->bridge_num; + list_for_each_entry(dst, &dsa_tree_list, list) { + struct dsa_bridge *bridge; + + bridge = dsa_tree_bridge_find(dst, bridge_dev); + if (bridge) + return bridge->num; + } - return -1; + return 0; } -int dsa_bridge_num_get(const struct net_device *bridge_dev, int max) +unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max) { - int bridge_num = dsa_bridge_num_find(bridge_dev); + unsigned int bridge_num = dsa_bridge_num_find(bridge_dev); + + /* Switches without FDB isolation support don't get unique + * bridge numbering + */ + if (!max) + return 0; - if (bridge_num < 0) { - /* First port that offloads TX forwarding for this bridge */ - bridge_num = find_first_zero_bit(&dsa_fwd_offloading_bridges, - DSA_MAX_NUM_OFFLOADING_BRIDGES); + if (!bridge_num) { + /* First port that requests FDB isolation or TX forwarding + * offload for this bridge + */ + bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges, + DSA_MAX_NUM_OFFLOADING_BRIDGES, + 1); if (bridge_num >= max) - return -1; + return 0; set_bit(bridge_num, &dsa_fwd_offloading_bridges); } @@ -165,13 +182,14 @@ int dsa_bridge_num_get(const struct net_device *bridge_dev, int max) return bridge_num; } -void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num) +void dsa_bridge_num_put(const struct net_device *bridge_dev, + unsigned int bridge_num) { - /* Check if the bridge is still in use, otherwise it is time - * to clean it up so we can reuse this bridge_num later. + /* Since we refcount bridges, we know that when we call this function + * it is no longer in use, so we can just go ahead and remove it from + * the bit mask. */ - if (dsa_bridge_num_find(bridge_dev) < 0) - clear_bit(bridge_num, &dsa_fwd_offloading_bridges); + clear_bit(bridge_num, &dsa_fwd_offloading_bridges); } struct dsa_switch *dsa_switch_find(int tree_index, int sw_index) @@ -804,7 +822,7 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds) int err; if (tag_ops->proto == dst->default_proto) - return 0; + goto connect; dsa_switch_for_each_cpu_port(cpu_dp, ds) { rtnl_lock(); @@ -818,7 +836,30 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds) } } +connect: + if (tag_ops->connect) { + err = tag_ops->connect(ds); + if (err) + return err; + } + + if (ds->ops->connect_tag_protocol) { + err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); + if (err) { + dev_err(ds->dev, + "Unable to connect to tag protocol \"%s\": %pe\n", + tag_ops->name, ERR_PTR(err)); + goto disconnect; + } + } + return 0; + +disconnect: + if (tag_ops->disconnect) + tag_ops->disconnect(ds); + + return err; } static int dsa_switch_setup(struct dsa_switch *ds) @@ -1118,6 +1159,37 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst) dst->setup = false; } +static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst, + const struct dsa_device_ops *tag_ops) +{ + const struct dsa_device_ops *old_tag_ops = dst->tag_ops; + struct dsa_notifier_tag_proto_info info; + int err; + + dst->tag_ops = tag_ops; + + /* Notify the switches from this tree about the connection + * to the new tagger + */ + info.tag_ops = tag_ops; + err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info); + if (err && err != -EOPNOTSUPP) + goto out_disconnect; + + /* Notify the old tagger about the disconnection from this tree */ + info.tag_ops = old_tag_ops; + dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); + + return 0; + +out_disconnect: + info.tag_ops = tag_ops; + dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); + dst->tag_ops = old_tag_ops; + + return err; +} + /* Since the dsa/tagging sysfs device attribute is per master, the assumption * is that all DSA switches within a tree share the same tagger, otherwise * they would have formed disjoint trees (different "dsa,member" values). @@ -1150,12 +1222,15 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, goto out_unlock; } + /* Notify the tag protocol change */ info.tag_ops = tag_ops; err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info); if (err) - goto out_unwind_tagger; + return err; - dst->tag_ops = tag_ops; + err = dsa_tree_bind_tag_proto(dst, tag_ops); + if (err) + goto out_unwind_tagger; rtnl_unlock(); @@ -1184,7 +1259,6 @@ static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index) dp->ds = ds; dp->index = index; - dp->bridge_num = -1; INIT_LIST_HEAD(&dp->list); list_add_tail(&dp->list, &dst->ports); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index a5c9bc7b66c6..edfaae7b5967 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -37,6 +37,8 @@ enum { DSA_NOTIFIER_VLAN_DEL, DSA_NOTIFIER_MTU, DSA_NOTIFIER_TAG_PROTO, + DSA_NOTIFIER_TAG_PROTO_CONNECT, + DSA_NOTIFIER_TAG_PROTO_DISCONNECT, DSA_NOTIFIER_MRP_ADD, DSA_NOTIFIER_MRP_DEL, DSA_NOTIFIER_MRP_ADD_RING_ROLE, @@ -52,10 +54,11 @@ struct dsa_notifier_ageing_time_info { /* DSA_NOTIFIER_BRIDGE_* */ struct dsa_notifier_bridge_info { - struct net_device *br; + struct dsa_bridge bridge; int tree_index; int sw_index; int port; + bool tx_fwd_offload; }; /* DSA_NOTIFIER_FDB_* */ @@ -258,54 +261,13 @@ int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, const struct switchdev_obj_ring_role_mrp *mrp); int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, const struct switchdev_obj_ring_role_mrp *mrp); +int dsa_port_phylink_create(struct dsa_port *dp); int dsa_port_link_register_of(struct dsa_port *dp); void dsa_port_link_unregister_of(struct dsa_port *dp); int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr); void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr); int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast); void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast); -extern const struct phylink_mac_ops dsa_port_phylink_mac_ops; - -static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp, - const struct net_device *dev) -{ - return dsa_port_to_bridge_port(dp) == dev; -} - -static inline bool dsa_port_offloads_bridge(struct dsa_port *dp, - const struct net_device *bridge_dev) -{ - /* DSA ports connected to a bridge, and event was emitted - * for the bridge. - */ - return dp->bridge_dev == bridge_dev; -} - -/* Returns true if any port of this tree offloads the given net_device */ -static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst, - const struct net_device *dev) -{ - struct dsa_port *dp; - - list_for_each_entry(dp, &dst->ports, list) - if (dsa_port_offloads_bridge_port(dp, dev)) - return true; - - return false; -} - -/* Returns true if any port of this tree offloads the given bridge */ -static inline bool dsa_tree_offloads_bridge(struct dsa_switch_tree *dst, - const struct net_device *bridge_dev) -{ - struct dsa_port *dp; - - list_for_each_entry(dp, &dst->ports, list) - if (dsa_port_offloads_bridge(dp, bridge_dev)) - return true; - - return false; -} /* slave.c */ extern const struct dsa_device_ops notag_netdev_ops; @@ -345,7 +307,7 @@ dsa_slave_to_master(const struct net_device *dev) static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb) { struct dsa_port *dp = dsa_slave_to_port(skb->dev); - struct net_device *br = dp->bridge_dev; + struct net_device *br = dsa_port_bridge_dev_get(dp); struct net_device *dev = skb->dev; struct net_device *upper_dev; u16 vid, pvid, proto; @@ -415,7 +377,7 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid) if (dp->type != DSA_PORT_TYPE_USER) continue; - if (!dp->bridge_dev) + if (!dp->bridge) continue; if (dp->stp_state != BR_STATE_LEARNING && @@ -444,7 +406,7 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid) /* If the ingress port offloads the bridge, we mark the frame as autonomously * forwarded by hardware, so the software bridge doesn't forward in twice, back * to us, because we already did. However, if we're in fallback mode and we do - * software bridging, we are not offloading it, therefore the dp->bridge_dev + * software bridging, we are not offloading it, therefore the dp->bridge * pointer is not populated, and flooding needs to be done by software (we are * effectively operating in standalone ports mode). */ @@ -452,7 +414,7 @@ static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb) { struct dsa_port *dp = dsa_slave_to_port(skb->dev); - skb->offload_fwd_mark = !!(dp->bridge_dev); + skb->offload_fwd_mark = !!(dp->bridge); } /* Helper for removing DSA header tags from packets in the RX path. @@ -546,8 +508,11 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, struct net_device *master, const struct dsa_device_ops *tag_ops, const struct dsa_device_ops *old_tag_ops); -int dsa_bridge_num_get(const struct net_device *bridge_dev, int max); -void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num); +unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max); +void dsa_bridge_num_put(const struct net_device *bridge_dev, + unsigned int bridge_num); +struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst, + const struct net_device *br); /* tag_8021q.c */ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, diff --git a/net/dsa/port.c b/net/dsa/port.c index f6f12ad2b525..05677e016982 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -130,7 +130,7 @@ int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) return err; } - if (!dp->bridge_dev) + if (!dp->bridge) dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false); if (dp->pl) @@ -158,7 +158,7 @@ void dsa_port_disable_rt(struct dsa_port *dp) if (dp->pl) phylink_stop(dp->pl); - if (!dp->bridge_dev) + if (!dp->bridge) dsa_port_set_state_now(dp, BR_STATE_DISABLED, false); if (ds->ops->port_disable) @@ -221,7 +221,7 @@ static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, struct netlink_ext_ack *extack) { struct net_device *brport_dev = dsa_port_to_bridge_port(dp); - struct net_device *br = dp->bridge_dev; + struct net_device *br = dsa_port_bridge_dev_get(dp); int err; err = dsa_port_inherit_brport_flags(dp, extack); @@ -270,52 +270,55 @@ static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp) */ } -static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp, - struct net_device *bridge_dev) +static int dsa_port_bridge_create(struct dsa_port *dp, + struct net_device *br, + struct netlink_ext_ack *extack) { - int bridge_num = dp->bridge_num; struct dsa_switch *ds = dp->ds; + struct dsa_bridge *bridge; - /* No bridge TX forwarding offload => do nothing */ - if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1) - return; + bridge = dsa_tree_bridge_find(ds->dst, br); + if (bridge) { + refcount_inc(&bridge->refcount); + dp->bridge = bridge; + return 0; + } - dp->bridge_num = -1; + bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); + if (!bridge) + return -ENOMEM; - dsa_bridge_num_put(bridge_dev, bridge_num); + refcount_set(&bridge->refcount, 1); - /* Notify the chips only once the offload has been deactivated, so - * that they can update their configuration accordingly. - */ - ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev, - bridge_num); + bridge->dev = br; + + bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges); + if (ds->max_num_bridges && !bridge->num) { + NL_SET_ERR_MSG_MOD(extack, + "Range of offloadable bridges exceeded"); + kfree(bridge); + return -EOPNOTSUPP; + } + + dp->bridge = bridge; + + return 0; } -static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp, - struct net_device *bridge_dev) +static void dsa_port_bridge_destroy(struct dsa_port *dp, + const struct net_device *br) { - struct dsa_switch *ds = dp->ds; - int bridge_num, err; + struct dsa_bridge *bridge = dp->bridge; - if (!ds->ops->port_bridge_tx_fwd_offload) - return false; + dp->bridge = NULL; - bridge_num = dsa_bridge_num_get(bridge_dev, - ds->num_fwd_offloading_bridges); - if (bridge_num < 0) - return false; + if (!refcount_dec_and_test(&bridge->refcount)) + return; - dp->bridge_num = bridge_num; + if (bridge->num) + dsa_bridge_num_put(br, bridge->num); - /* Notify the driver */ - err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev, - bridge_num); - if (err) { - dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev); - return false; - } - - return true; + kfree(bridge); } int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, @@ -325,30 +328,32 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, .tree_index = dp->ds->dst->index, .sw_index = dp->ds->index, .port = dp->index, - .br = br, }; struct net_device *dev = dp->slave; struct net_device *brport_dev; - bool tx_fwd_offload; int err; /* Here the interface is already bridged. Reflect the current * configuration so that drivers can program their chips accordingly. */ - dp->bridge_dev = br; + err = dsa_port_bridge_create(dp, br, extack); + if (err) + return err; brport_dev = dsa_port_to_bridge_port(dp); + info.bridge = *dp->bridge; err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info); if (err) goto out_rollback; - tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br); + /* Drivers which support bridge TX forwarding should set this */ + dp->bridge->tx_fwd_offload = info.tx_fwd_offload; err = switchdev_bridge_port_offload(brport_dev, dev, dp, &dsa_slave_switchdev_notifier, &dsa_slave_switchdev_blocking_notifier, - tx_fwd_offload, extack); + dp->bridge->tx_fwd_offload, extack); if (err) goto out_rollback_unbridge; @@ -365,7 +370,7 @@ out_rollback_unoffload: out_rollback_unbridge: dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); out_rollback: - dp->bridge_dev = NULL; + dsa_port_bridge_destroy(dp, br); return err; } @@ -390,16 +395,14 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) .tree_index = dp->ds->dst->index, .sw_index = dp->ds->index, .port = dp->index, - .br = br, + .bridge = *dp->bridge, }; int err; /* Here the port is already unbridged. Reflect the current configuration * so that drivers can program their chips accordingly. */ - dp->bridge_dev = NULL; - - dsa_port_bridge_tx_fwd_unoffload(dp, br); + dsa_port_bridge_destroy(dp, br); err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); if (err) @@ -477,12 +480,15 @@ err_lag_join: void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag) { - if (dp->bridge_dev) - dsa_port_pre_bridge_leave(dp, dp->bridge_dev); + struct net_device *br = dsa_port_bridge_dev_get(dp); + + if (br) + dsa_port_pre_bridge_leave(dp, br); } void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag) { + struct net_device *br = dsa_port_bridge_dev_get(dp); struct dsa_notifier_lag_info info = { .sw_index = dp->ds->index, .port = dp->index, @@ -496,8 +502,8 @@ void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag) /* Port might have been part of a LAG that in turn was * attached to a bridge. */ - if (dp->bridge_dev) - dsa_port_bridge_leave(dp, dp->bridge_dev); + if (br) + dsa_port_bridge_leave(dp, br); dp->lag_tx_enabled = false; dp->lag_dev = NULL; @@ -526,8 +532,8 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, * as long as we have 8021q uppers. */ if (vlan_filtering && dsa_port_is_user(dp)) { + struct net_device *br = dsa_port_bridge_dev_get(dp); struct net_device *upper_dev, *slave = dp->slave; - struct net_device *br = dp->bridge_dev; struct list_head *iter; netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { @@ -561,17 +567,15 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, * different setting than what is being requested. */ dsa_switch_for_each_port(other_dp, ds) { - struct net_device *other_bridge; + struct net_device *other_br = dsa_port_bridge_dev_get(other_dp); - other_bridge = other_dp->bridge_dev; - if (!other_bridge) - continue; /* If it's the same bridge, it also has same * vlan_filtering setting => no need to check */ - if (other_bridge == dp->bridge_dev) + if (!other_br || other_br == dsa_port_bridge_dev_get(dp)) continue; - if (br_vlan_enabled(other_bridge) != vlan_filtering) { + + if (br_vlan_enabled(other_br) != vlan_filtering) { NL_SET_ERR_MSG_MOD(extack, "VLAN filtering is a global setting"); return false; @@ -655,13 +659,13 @@ restore: */ bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) { + struct net_device *br = dsa_port_bridge_dev_get(dp); struct dsa_switch *ds = dp->ds; - if (!dp->bridge_dev) + if (!br) return false; - return (!ds->configure_vlan_while_not_filtering && - !br_vlan_enabled(dp->bridge_dev)); + return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br); } int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock) @@ -981,8 +985,11 @@ static void dsa_port_phylink_validate(struct phylink_config *config, struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); struct dsa_switch *ds = dp->ds; - if (!ds->ops->phylink_validate) + if (!ds->ops->phylink_validate) { + if (config->mac_capabilities) + phylink_generic_validate(config, supported, state); return; + } ds->ops->phylink_validate(ds, dp->index, supported, state); } @@ -1072,7 +1079,7 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config, speed, duplex, tx_pause, rx_pause); } -const struct phylink_mac_ops dsa_port_phylink_mac_ops = { +static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { .validate = dsa_port_phylink_validate, .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, .mac_config = dsa_port_phylink_mac_config, @@ -1081,6 +1088,36 @@ const struct phylink_mac_ops dsa_port_phylink_mac_ops = { .mac_link_up = dsa_port_phylink_mac_link_up, }; +int dsa_port_phylink_create(struct dsa_port *dp) +{ + struct dsa_switch *ds = dp->ds; + phy_interface_t mode; + int err; + + err = of_get_phy_mode(dp->dn, &mode); + if (err) + mode = PHY_INTERFACE_MODE_NA; + + /* Presence of phylink_mac_link_state or phylink_mac_an_restart is + * an indicator of a legacy phylink driver. + */ + if (ds->ops->phylink_mac_link_state || + ds->ops->phylink_mac_an_restart) + dp->pl_config.legacy_pre_march2020 = true; + + if (ds->ops->phylink_get_caps) + ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); + + dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), + mode, &dsa_port_phylink_mac_ops); + if (IS_ERR(dp->pl)) { + pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); + return PTR_ERR(dp->pl); + } + + return 0; +} + static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) { struct dsa_switch *ds = dp->ds; @@ -1157,27 +1194,15 @@ static int dsa_port_phylink_register(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; struct device_node *port_dn = dp->dn; - phy_interface_t mode; int err; - err = of_get_phy_mode(port_dn, &mode); - if (err) - mode = PHY_INTERFACE_MODE_NA; - dp->pl_config.dev = ds->dev; dp->pl_config.type = PHYLINK_DEV; dp->pl_config.pcs_poll = ds->pcs_poll; - if (ds->ops->phylink_get_interfaces) - ds->ops->phylink_get_interfaces(ds, dp->index, - dp->pl_config.supported_interfaces); - - dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), - mode, &dsa_port_phylink_mac_ops); - if (IS_ERR(dp->pl)) { - pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); - return PTR_ERR(dp->pl); - } + err = dsa_port_phylink_create(dp); + if (err) + return err; err = phylink_of_phy_connect(dp->pl, port_dn, 0); if (err && err != -ENODEV) { diff --git a/net/dsa/slave.c b/net/dsa/slave.c index ad61f6bc8886..88f7b8686dac 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -289,14 +289,14 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx, ret = dsa_port_set_state(dp, attr->u.stp_state, true); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - if (!dsa_port_offloads_bridge(dp, attr->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) return -EOPNOTSUPP; ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering, extack); break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - if (!dsa_port_offloads_bridge(dp, attr->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) return -EOPNOTSUPP; ret = dsa_port_ageing_time(dp, attr->u.ageing_time); @@ -363,7 +363,7 @@ static int dsa_slave_vlan_add(struct net_device *dev, /* Deny adding a bridge VLAN when there is already an 802.1Q upper with * the same VID. */ - if (br_vlan_enabled(dp->bridge_dev)) { + if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) { rcu_read_lock(); err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan); rcu_read_unlock(); @@ -409,7 +409,7 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx, err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); break; case SWITCHDEV_OBJ_ID_HOST_MDB: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); @@ -421,13 +421,13 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx, err = dsa_slave_vlan_add(dev, obj, extack); break; case SWITCHDEV_OBJ_ID_MRP: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj)); break; case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_mrp_add_ring_role(dp, @@ -483,7 +483,7 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx, err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); break; case SWITCHDEV_OBJ_ID_HOST_MDB: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); @@ -495,13 +495,13 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx, err = dsa_slave_vlan_del(dev, obj); break; case SWITCHDEV_OBJ_ID_MRP: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj)); break; case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: - if (!dsa_port_offloads_bridge(dp, obj->orig_dev)) + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) return -EOPNOTSUPP; err = dsa_port_mrp_del_ring_role(dp, @@ -1564,7 +1564,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp) if (!dp->ds->mtu_enforcement_ingress) return; - if (!dp->bridge_dev) + if (!dp->bridge) return; INIT_LIST_HEAD(&hw_port_list); @@ -1580,7 +1580,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp) if (other_dp->type != DSA_PORT_TYPE_USER) continue; - if (other_dp->bridge_dev != dp->bridge_dev) + if (!dsa_port_bridge_same(dp, other_dp)) continue; if (!other_dp->ds->mtu_enforcement_ingress) @@ -1851,14 +1851,9 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev) struct dsa_port *dp = dsa_slave_to_port(slave_dev); struct device_node *port_dn = dp->dn; struct dsa_switch *ds = dp->ds; - phy_interface_t mode; u32 phy_flags = 0; int ret; - ret = of_get_phy_mode(port_dn, &mode); - if (ret) - mode = PHY_INTERFACE_MODE_NA; - dp->pl_config.dev = &slave_dev->dev; dp->pl_config.type = PHYLINK_NETDEV; @@ -1871,17 +1866,9 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev) dp->pl_config.poll_fixed_state = true; } - if (ds->ops->phylink_get_interfaces) - ds->ops->phylink_get_interfaces(ds, dp->index, - dp->pl_config.supported_interfaces); - - dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode, - &dsa_port_phylink_mac_ops); - if (IS_ERR(dp->pl)) { - netdev_err(slave_dev, - "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); - return PTR_ERR(dp->pl); - } + ret = dsa_port_phylink_create(dp); + if (ret) + return ret; if (ds->ops->get_phy_flags) phy_flags = ds->ops->get_phy_flags(ds, dp->index); @@ -2233,7 +2220,7 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev, struct netdev_notifier_changeupper_info *info) { struct netlink_ext_ack *ext_ack; - struct net_device *slave; + struct net_device *slave, *br; struct dsa_port *dp; ext_ack = netdev_notifier_info_to_extack(&info->info); @@ -2246,11 +2233,12 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev, return NOTIFY_DONE; dp = dsa_slave_to_port(slave); - if (!dp->bridge_dev) + br = dsa_port_bridge_dev_get(dp); + if (!br) return NOTIFY_DONE; /* Deny enslaving a VLAN device into a VLAN-aware bridge */ - if (br_vlan_enabled(dp->bridge_dev) && + if (br_vlan_enabled(br) && netif_is_bridge_master(info->upper_dev) && info->linking) { NL_SET_ERR_MSG_MOD(ext_ack, "Cannot enslave VLAN device into VLAN aware bridge"); @@ -2265,7 +2253,7 @@ dsa_slave_check_8021q_upper(struct net_device *dev, struct netdev_notifier_changeupper_info *info) { struct dsa_port *dp = dsa_slave_to_port(dev); - struct net_device *br = dp->bridge_dev; + struct net_device *br = dsa_port_bridge_dev_get(dp); struct bridge_vlan_info br_info; struct netlink_ext_ack *extack; int err = NOTIFY_DONE; @@ -2462,7 +2450,7 @@ static bool dsa_foreign_dev_check(const struct net_device *dev, struct dsa_switch_tree *dst = dp->ds->dst; if (netif_is_bridge_master(foreign_dev)) - return !dsa_tree_offloads_bridge(dst, foreign_dev); + return !dsa_tree_offloads_bridge_dev(dst, foreign_dev); if (netif_is_bridge_port(foreign_dev)) return !dsa_tree_offloads_bridge_port(dst, foreign_dev); diff --git a/net/dsa/switch.c b/net/dsa/switch.c index bb155a16d454..393f2d8a860a 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -95,7 +95,8 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds, if (!ds->ops->port_bridge_join) return -EOPNOTSUPP; - err = ds->ops->port_bridge_join(ds, info->port, info->br); + err = ds->ops->port_bridge_join(ds, info->port, info->bridge, + &info->tx_fwd_offload); if (err) return err; } @@ -104,7 +105,7 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds, ds->ops->crosschip_bridge_join) { err = ds->ops->crosschip_bridge_join(ds, info->tree_index, info->sw_index, - info->port, info->br); + info->port, info->bridge); if (err) return err; } @@ -124,19 +125,20 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, if (dst->index == info->tree_index && ds->index == info->sw_index && ds->ops->port_bridge_leave) - ds->ops->port_bridge_leave(ds, info->port, info->br); + ds->ops->port_bridge_leave(ds, info->port, info->bridge); if ((dst->index != info->tree_index || ds->index != info->sw_index) && ds->ops->crosschip_bridge_leave) ds->ops->crosschip_bridge_leave(ds, info->tree_index, info->sw_index, info->port, - info->br); + info->bridge); - if (ds->needs_standalone_vlan_filtering && !br_vlan_enabled(info->br)) { + if (ds->needs_standalone_vlan_filtering && + !br_vlan_enabled(info->bridge.dev)) { change_vlan_filtering = true; vlan_filtering = true; } else if (!ds->needs_standalone_vlan_filtering && - br_vlan_enabled(info->br)) { + br_vlan_enabled(info->bridge.dev)) { change_vlan_filtering = true; vlan_filtering = false; } @@ -151,11 +153,9 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, */ if (change_vlan_filtering && ds->vlan_filtering_is_global) { dsa_switch_for_each_port(dp, ds) { - struct net_device *bridge_dev; + struct net_device *br = dsa_port_bridge_dev_get(dp); - bridge_dev = dp->bridge_dev; - - if (bridge_dev && br_vlan_enabled(bridge_dev)) { + if (br && br_vlan_enabled(br)) { change_vlan_filtering = false; break; } @@ -647,6 +647,60 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds, return 0; } +/* We use the same cross-chip notifiers to inform both the tagger side, as well + * as the switch side, of connection and disconnection events. + * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the + * switch side doesn't support connecting to this tagger, and therefore, the + * fact that we don't disconnect the tagger side doesn't constitute a memory + * leak: the tagger will still operate with persistent per-switch memory, just + * with the switch side unconnected to it. What does constitute a hard error is + * when the switch side supports connecting but fails. + */ +static int +dsa_switch_connect_tag_proto(struct dsa_switch *ds, + struct dsa_notifier_tag_proto_info *info) +{ + const struct dsa_device_ops *tag_ops = info->tag_ops; + int err; + + /* Notify the new tagger about the connection to this switch */ + if (tag_ops->connect) { + err = tag_ops->connect(ds); + if (err) + return err; + } + + if (!ds->ops->connect_tag_protocol) + return -EOPNOTSUPP; + + /* Notify the switch about the connection to the new tagger */ + err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); + if (err) { + /* Revert the new tagger's connection to this tree */ + if (tag_ops->disconnect) + tag_ops->disconnect(ds); + return err; + } + + return 0; +} + +static int +dsa_switch_disconnect_tag_proto(struct dsa_switch *ds, + struct dsa_notifier_tag_proto_info *info) +{ + const struct dsa_device_ops *tag_ops = info->tag_ops; + + /* Notify the tagger about the disconnection from this switch */ + if (tag_ops->disconnect && ds->tagger_data) + tag_ops->disconnect(ds); + + /* No need to notify the switch, since it shouldn't have any + * resources to tear down + */ + return 0; +} + static int dsa_switch_mrp_add(struct dsa_switch *ds, struct dsa_notifier_mrp_info *info) { @@ -766,6 +820,12 @@ static int dsa_switch_event(struct notifier_block *nb, case DSA_NOTIFIER_TAG_PROTO: err = dsa_switch_change_tag_proto(ds, info); break; + case DSA_NOTIFIER_TAG_PROTO_CONNECT: + err = dsa_switch_connect_tag_proto(ds, info); + break; + case DSA_NOTIFIER_TAG_PROTO_DISCONNECT: + err = dsa_switch_disconnect_tag_proto(ds, info); + break; case DSA_NOTIFIER_MRP_ADD: err = dsa_switch_mrp_add(ds, info); break; diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 72cac2c0af7b..27712a81c967 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -67,10 +67,12 @@ #define DSA_8021Q_PORT(x) (((x) << DSA_8021Q_PORT_SHIFT) & \ DSA_8021Q_PORT_MASK) -u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num) +u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num) { - /* The VBID value of 0 is reserved for precise TX */ - return DSA_8021Q_DIR_TX | DSA_8021Q_VBID(bridge_num + 1); + /* The VBID value of 0 is reserved for precise TX, but it is also + * reserved/invalid for the bridge_num, so all is well. + */ + return DSA_8021Q_DIR_TX | DSA_8021Q_VBID(bridge_num); } EXPORT_SYMBOL_GPL(dsa_8021q_bridge_tx_fwd_offload_vid); @@ -335,7 +337,7 @@ dsa_port_tag_8021q_bridge_match(struct dsa_port *dp, return false; if (dsa_port_is_user(dp)) - return dp->bridge_dev == info->br; + return dsa_port_offloads_bridge(dp, &info->bridge); return false; } @@ -408,10 +410,9 @@ int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, } int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, - struct net_device *br, - int bridge_num) + struct dsa_bridge bridge) { - u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num); + u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); return dsa_port_tag_8021q_vlan_add(dsa_to_port(ds, port), tx_vid, true); @@ -419,10 +420,9 @@ int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_tx_fwd_offload); void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, - struct net_device *br, - int bridge_num) + struct dsa_bridge bridge) { - u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num); + u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num); dsa_port_tag_8021q_vlan_del(dsa_to_port(ds, port), tx_vid, true); } diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index b3da4b2ea11c..8abf39dcac64 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -132,6 +132,7 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev, u8 *dsa_header; if (skb->offload_fwd_mark) { + unsigned int bridge_num = dsa_port_bridge_num_get(dp); struct dsa_switch_tree *dst = dp->ds->dst; cmd = DSA_CMD_FORWARD; @@ -140,7 +141,7 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev, * packets on behalf of a virtual switch device with an index * past the physical switches. */ - tag_dev = dst->last_switch + 1 + dp->bridge_num; + tag_dev = dst->last_switch + bridge_num; tag_port = 0; } else { cmd = DSA_CMD_FROM_CPU; @@ -165,7 +166,7 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev, dsa_header[2] &= ~0x10; } } else { - struct net_device *br = dp->bridge_dev; + struct net_device *br = dsa_port_bridge_dev_get(dp); u16 vid; vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE; diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c index de1c849a0a70..4ba460c5a880 100644 --- a/net/dsa/tag_ocelot.c +++ b/net/dsa/tag_ocelot.c @@ -12,7 +12,7 @@ static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp, u64 *vlan_tci, u64 *tag_type) { - struct net_device *br = READ_ONCE(dp->bridge_dev); + struct net_device *br = dsa_port_bridge_dev_get(dp); struct vlan_ethhdr *hdr; u16 proto, tci; diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c index a1919ea5e828..68982b2789a5 100644 --- a/net/dsa/tag_ocelot_8021q.c +++ b/net/dsa/tag_ocelot_8021q.c @@ -12,25 +12,39 @@ #include <linux/dsa/ocelot.h> #include "dsa_priv.h" +struct ocelot_8021q_tagger_private { + struct ocelot_8021q_tagger_data data; /* Must be first */ + struct kthread_worker *xmit_worker; +}; + static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp, struct sk_buff *skb) { + struct ocelot_8021q_tagger_private *priv = dp->ds->tagger_data; + struct ocelot_8021q_tagger_data *data = &priv->data; + void (*xmit_work_fn)(struct kthread_work *work); struct felix_deferred_xmit_work *xmit_work; - struct felix_port *felix_port = dp->priv; + struct kthread_worker *xmit_worker; + + xmit_work_fn = data->xmit_work_fn; + xmit_worker = priv->xmit_worker; + + if (!xmit_work_fn || !xmit_worker) + return NULL; xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC); if (!xmit_work) return NULL; /* Calls felix_port_deferred_xmit in felix.c */ - kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn); + kthread_init_work(&xmit_work->work, xmit_work_fn); /* Increase refcount so the kfree_skb in dsa_slave_xmit * won't really free the packet. */ xmit_work->dp = dp; xmit_work->skb = skb_get(skb); - kthread_queue_work(felix_port->xmit_worker, &xmit_work->work); + kthread_queue_work(xmit_worker, &xmit_work->work); return NULL; } @@ -67,11 +81,43 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb, return skb; } +static void ocelot_disconnect(struct dsa_switch *ds) +{ + struct ocelot_8021q_tagger_private *priv = ds->tagger_data; + + kthread_destroy_worker(priv->xmit_worker); + kfree(priv); + ds->tagger_data = NULL; +} + +static int ocelot_connect(struct dsa_switch *ds) +{ + struct ocelot_8021q_tagger_private *priv; + int err; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->xmit_worker = kthread_create_worker(0, "felix_xmit"); + if (IS_ERR(priv->xmit_worker)) { + err = PTR_ERR(priv->xmit_worker); + kfree(priv); + return err; + } + + ds->tagger_data = priv; + + return 0; +} + static const struct dsa_device_ops ocelot_8021q_netdev_ops = { .name = "ocelot-8021q", .proto = DSA_TAG_PROTO_OCELOT_8021Q, .xmit = ocelot_xmit, .rcv = ocelot_rcv, + .connect = ocelot_connect, + .disconnect = ocelot_disconnect, .needed_headroom = VLAN_HLEN, .promisc_on_master = true, }; diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 262c8833a910..72d5e0ef8dcf 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -4,7 +4,6 @@ #include <linux/if_vlan.h> #include <linux/dsa/sja1105.h> #include <linux/dsa/8021q.h> -#include <linux/skbuff.h> #include <linux/packing.h> #include "dsa_priv.h" @@ -54,11 +53,25 @@ #define SJA1110_TX_TRAILER_LEN 4 #define SJA1110_MAX_PADDING_LEN 15 -enum sja1110_meta_tstamp { - SJA1110_META_TSTAMP_TX = 0, - SJA1110_META_TSTAMP_RX = 1, +#define SJA1105_HWTS_RX_EN 0 + +struct sja1105_tagger_private { + struct sja1105_tagger_data data; /* Must be first */ + unsigned long state; + /* Protects concurrent access to the meta state machine + * from taggers running on multiple ports on SMP systems + */ + spinlock_t meta_lock; + struct sk_buff *stampable_skb; + struct kthread_worker *xmit_worker; }; +static struct sja1105_tagger_private * +sja1105_tagger_private(struct dsa_switch *ds) +{ + return ds->tagger_data; +} + /* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */ static inline bool sja1105_is_link_local(const struct sk_buff *skb) { @@ -125,16 +138,30 @@ static inline bool sja1105_is_meta_frame(const struct sk_buff *skb) static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp, struct sk_buff *skb) { - struct sja1105_port *sp = dp->priv; + struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds); + struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds); + void (*xmit_work_fn)(struct kthread_work *work); + struct sja1105_deferred_xmit_work *xmit_work; + struct kthread_worker *xmit_worker; - if (!dsa_port_is_sja1105(dp)) - return skb; + xmit_work_fn = tagger_data->xmit_work_fn; + xmit_worker = priv->xmit_worker; + + if (!xmit_work_fn || !xmit_worker) + return NULL; + + xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC); + if (!xmit_work) + return NULL; + kthread_init_work(&xmit_work->work, xmit_work_fn); /* Increase refcount so the kfree_skb in dsa_slave_xmit * won't really free the packet. */ - skb_queue_tail(&sp->xmit_queue, skb_get(skb)); - kthread_queue_work(sp->xmit_worker, &sp->xmit_work); + xmit_work->dp = dp; + xmit_work->skb = skb_get(skb); + + kthread_queue_work(xmit_worker, &xmit_work->work); return NULL; } @@ -159,14 +186,16 @@ static u16 sja1105_xmit_tpid(struct dsa_port *dp) * need to find it. */ dsa_switch_for_each_port(other_dp, ds) { - if (!other_dp->bridge_dev) + struct net_device *br = dsa_port_bridge_dev_get(other_dp); + + if (!br) continue; /* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING, * which seems pointless to handle, as our port cannot become * VLAN-aware in that case. */ - br_vlan_get_proto(other_dp->bridge_dev, &proto); + br_vlan_get_proto(br, &proto); return proto; } @@ -180,7 +209,8 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb, struct net_device *netdev) { struct dsa_port *dp = dsa_slave_to_port(netdev); - struct net_device *br = dp->bridge_dev; + unsigned int bridge_num = dsa_port_bridge_num_get(dp); + struct net_device *br = dsa_port_bridge_dev_get(dp); u16 tx_vid; /* If the port is under a VLAN-aware bridge, just slide the @@ -196,7 +226,7 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb, * TX VLAN that targets the bridge's entire broadcast domain, * instead of just the specific port. */ - tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(dp->bridge_num); + tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num); return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid); } @@ -352,32 +382,32 @@ static struct sk_buff */ if (is_link_local) { struct dsa_port *dp = dsa_slave_to_port(skb->dev); - struct sja1105_port *sp = dp->priv; + struct sja1105_tagger_private *priv; + struct dsa_switch *ds = dp->ds; - if (unlikely(!dsa_port_is_sja1105(dp))) - return skb; + priv = sja1105_tagger_private(ds); - if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state)) + if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) /* Do normal processing. */ return skb; - spin_lock(&sp->data->meta_lock); + spin_lock(&priv->meta_lock); /* Was this a link-local frame instead of the meta * that we were expecting? */ - if (sp->data->stampable_skb) { - dev_err_ratelimited(dp->ds->dev, + if (priv->stampable_skb) { + dev_err_ratelimited(ds->dev, "Expected meta frame, is %12llx " "in the DSA master multicast filter?\n", SJA1105_META_DMAC); - kfree_skb(sp->data->stampable_skb); + kfree_skb(priv->stampable_skb); } /* Hold a reference to avoid dsa_switch_rcv * from freeing the skb. */ - sp->data->stampable_skb = skb_get(skb); - spin_unlock(&sp->data->meta_lock); + priv->stampable_skb = skb_get(skb); + spin_unlock(&priv->meta_lock); /* Tell DSA we got nothing */ return NULL; @@ -390,37 +420,37 @@ static struct sk_buff */ } else if (is_meta) { struct dsa_port *dp = dsa_slave_to_port(skb->dev); - struct sja1105_port *sp = dp->priv; + struct sja1105_tagger_private *priv; + struct dsa_switch *ds = dp->ds; struct sk_buff *stampable_skb; - if (unlikely(!dsa_port_is_sja1105(dp))) - return skb; + priv = sja1105_tagger_private(ds); /* Drop the meta frame if we're not in the right state * to process it. */ - if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state)) + if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) return NULL; - spin_lock(&sp->data->meta_lock); + spin_lock(&priv->meta_lock); - stampable_skb = sp->data->stampable_skb; - sp->data->stampable_skb = NULL; + stampable_skb = priv->stampable_skb; + priv->stampable_skb = NULL; /* Was this a meta frame instead of the link-local * that we were expecting? */ if (!stampable_skb) { - dev_err_ratelimited(dp->ds->dev, + dev_err_ratelimited(ds->dev, "Unexpected meta frame\n"); - spin_unlock(&sp->data->meta_lock); + spin_unlock(&priv->meta_lock); return NULL; } if (stampable_skb->dev != skb->dev) { - dev_err_ratelimited(dp->ds->dev, + dev_err_ratelimited(ds->dev, "Meta frame on wrong port\n"); - spin_unlock(&sp->data->meta_lock); + spin_unlock(&priv->meta_lock); return NULL; } @@ -431,12 +461,36 @@ static struct sk_buff skb = stampable_skb; sja1105_transfer_meta(skb, meta); - spin_unlock(&sp->data->meta_lock); + spin_unlock(&priv->meta_lock); } return skb; } +static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds) +{ + struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); + + return test_bit(SJA1105_HWTS_RX_EN, &priv->state); +} + +static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on) +{ + struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); + + if (on) + set_bit(SJA1105_HWTS_RX_EN, &priv->state); + else + clear_bit(SJA1105_HWTS_RX_EN, &priv->state); + + /* Initialize the meta state machine to a known state */ + if (!priv->stampable_skb) + return; + + kfree_skb(priv->stampable_skb); + priv->stampable_skb = NULL; +} + static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb) { u16 tpid = ntohs(eth_hdr(skb)->h_proto); @@ -523,48 +577,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, is_meta); } -static void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, - u8 ts_id, enum sja1110_meta_tstamp dir, - u64 tstamp) -{ - struct sk_buff *skb, *skb_tmp, *skb_match = NULL; - struct dsa_port *dp = dsa_to_port(ds, port); - struct skb_shared_hwtstamps shwt = {0}; - struct sja1105_port *sp = dp->priv; - - if (!dsa_port_is_sja1105(dp)) - return; - - /* We don't care about RX timestamps on the CPU port */ - if (dir == SJA1110_META_TSTAMP_RX) - return; - - spin_lock(&sp->data->skb_txtstamp_queue.lock); - - skb_queue_walk_safe(&sp->data->skb_txtstamp_queue, skb, skb_tmp) { - if (SJA1105_SKB_CB(skb)->ts_id != ts_id) - continue; - - __skb_unlink(skb, &sp->data->skb_txtstamp_queue); - skb_match = skb; - - break; - } - - spin_unlock(&sp->data->skb_txtstamp_queue.lock); - - if (WARN_ON(!skb_match)) - return; - - shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp)); - skb_complete_tx_timestamp(skb_match, &shwt); -} - static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) { u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN; int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header); int n_ts = SJA1110_RX_HEADER_N_TS(rx_header); + struct sja1105_tagger_data *tagger_data; struct net_device *master = skb->dev; struct dsa_port *cpu_dp; struct dsa_switch *ds; @@ -578,6 +596,10 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) return NULL; } + tagger_data = sja1105_tagger_data(ds); + if (!tagger_data->meta_tstamp_handler) + return NULL; + for (i = 0; i <= n_ts; i++) { u8 ts_id, source_port, dir; u64 tstamp; @@ -587,8 +609,8 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) dir = (buf[1] & BIT(3)) >> 3; tstamp = be64_to_cpu(*(__be64 *)(buf + 2)); - sja1110_process_meta_tstamp(ds, source_port, ts_id, dir, - tstamp); + tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir, + tstamp); buf += SJA1110_META_TSTAMP_SIZE; } @@ -719,11 +741,53 @@ static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto, *proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1]; } +static void sja1105_disconnect(struct dsa_switch *ds) +{ + struct sja1105_tagger_private *priv = ds->tagger_data; + + kthread_destroy_worker(priv->xmit_worker); + kfree(priv); + ds->tagger_data = NULL; +} + +static int sja1105_connect(struct dsa_switch *ds) +{ + struct sja1105_tagger_data *tagger_data; + struct sja1105_tagger_private *priv; + struct kthread_worker *xmit_worker; + int err; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->meta_lock); + + xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit", + ds->dst->index, ds->index); + if (IS_ERR(xmit_worker)) { + err = PTR_ERR(xmit_worker); + kfree(priv); + return err; + } + + priv->xmit_worker = xmit_worker; + /* Export functions for switch driver use */ + tagger_data = &priv->data; + tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state; + tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state; + ds->tagger_data = priv; + + return 0; +} + static const struct dsa_device_ops sja1105_netdev_ops = { .name = "sja1105", .proto = DSA_TAG_PROTO_SJA1105, .xmit = sja1105_xmit, .rcv = sja1105_rcv, + .connect = sja1105_connect, + .disconnect = sja1105_disconnect, .needed_headroom = VLAN_HLEN, .flow_dissect = sja1105_flow_dissect, .promisc_on_master = true, @@ -737,6 +801,8 @@ static const struct dsa_device_ops sja1110_netdev_ops = { .proto = DSA_TAG_PROTO_SJA1110, .xmit = sja1110_xmit, .rcv = sja1110_rcv, + .connect = sja1105_connect, + .disconnect = sja1105_disconnect, .flow_dissect = sja1110_flow_dissect, .needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN, .needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN, diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c index 63560bbb7d1f..920aac02fe39 100644 --- a/net/ethtool/cabletest.c +++ b/net/ethtool/cabletest.c @@ -96,7 +96,7 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info) out_rtnl: rtnl_unlock(); out_dev_put: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } @@ -353,7 +353,7 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info) out_rtnl: rtnl_unlock(); out_dev_put: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c index 6a070dc8e4b0..403158862011 100644 --- a/net/ethtool/channels.c +++ b/net/ethtool/channels.c @@ -219,6 +219,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c index 46776ea42a92..487bdf345541 100644 --- a/net/ethtool/coalesce.c +++ b/net/ethtool/coalesce.c @@ -336,6 +336,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/debug.c b/net/ethtool/debug.c index f99912d7957e..d73888c7d19c 100644 --- a/net/ethtool/debug.c +++ b/net/ethtool/debug.c @@ -123,6 +123,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c index e10bfcc07853..45c42b2d5f17 100644 --- a/net/ethtool/eee.c +++ b/net/ethtool/eee.c @@ -185,6 +185,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/features.c b/net/ethtool/features.c index 2e7331b23996..55d449a2d3fc 100644 --- a/net/ethtool/features.c +++ b/net/ethtool/features.c @@ -283,6 +283,6 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info) out_rtnl: rtnl_unlock(); - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/fec.c b/net/ethtool/fec.c index 8738dafd5417..9f5a134e2e01 100644 --- a/net/ethtool/fec.c +++ b/net/ethtool/fec.c @@ -305,6 +305,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index fa8aa5ec19ba..9a113d893521 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1989,6 +1989,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) struct ethtool_value id; static bool busy; const struct ethtool_ops *ops = dev->ethtool_ops; + netdevice_tracker dev_tracker; int rc; if (!ops->set_phys_id) @@ -2008,7 +2009,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) * removal of the device. */ busy = true; - dev_hold(dev); + dev_hold_track(dev, &dev_tracker, GFP_KERNEL); rtnl_unlock(); if (rc == 0) { @@ -2032,7 +2033,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) } rtnl_lock(); - dev_put(dev); + dev_put_track(dev, &dev_tracker); busy = false; (void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE); diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c index b91839870efc..efa0f7f48836 100644 --- a/net/ethtool/linkinfo.c +++ b/net/ethtool/linkinfo.c @@ -149,6 +149,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c index f9eda596f301..99b29b4fe947 100644 --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c @@ -358,6 +358,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/module.c b/net/ethtool/module.c index bc2cef11bbda..898ed436b9e4 100644 --- a/net/ethtool/module.c +++ b/net/ethtool/module.c @@ -175,6 +175,6 @@ out_ops: ethnl_ops_complete(dev); out_rtnl: rtnl_unlock(); - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 38b44c0291b1..f09c62302a9a 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -40,7 +40,8 @@ int ethnl_ops_begin(struct net_device *dev) if (dev->dev.parent) pm_runtime_get_sync(dev->dev.parent); - if (!netif_device_present(dev)) { + if (!netif_device_present(dev) || + dev->reg_state == NETREG_UNREGISTERING) { ret = -ENODEV; goto err; } @@ -141,6 +142,8 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, } req_info->dev = dev; + if (dev) + netdev_tracker_alloc(dev, &req_info->dev_tracker, GFP_KERNEL); req_info->flags = flags; return 0; } @@ -399,7 +402,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info) ops->cleanup_data(reply_data); genlmsg_end(rskb, reply_payload); - dev_put(req_info->dev); + dev_put_track(req_info->dev, &req_info->dev_tracker); kfree(reply_data); kfree(req_info); return genlmsg_reply(rskb, info); @@ -411,7 +414,7 @@ err_cleanup: if (ops->cleanup_data) ops->cleanup_data(reply_data); err_dev: - dev_put(req_info->dev); + dev_put_track(req_info->dev, &req_info->dev_tracker); kfree(reply_data); kfree(req_info); return ret; @@ -547,7 +550,7 @@ static int ethnl_default_start(struct netlink_callback *cb) * same parser as for non-dump (doit) requests is used, it * would take reference to the device if it finds one */ - dev_put(req_info->dev); + dev_put_track(req_info->dev, &req_info->dev_tracker); req_info->dev = NULL; } if (ret < 0) @@ -624,6 +627,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd, } req_info->dev = dev; + netdev_tracker_alloc(dev, &req_info->dev_tracker, GFP_KERNEL); req_info->flags |= ETHTOOL_FLAG_COMPACT_BITSETS; ethnl_init_reply_data(reply_data, ops, dev); diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 490598e5eedd..75856db299e9 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -222,6 +222,7 @@ static inline unsigned int ethnl_reply_header_size(void) /** * struct ethnl_req_info - base type of request information for GET requests * @dev: network device the request is for (may be null) + * @dev_tracker: refcount tracker for @dev reference * @flags: request flags common for all request types * * This is a common base for request specific structures holding data from @@ -230,9 +231,15 @@ static inline unsigned int ethnl_reply_header_size(void) */ struct ethnl_req_info { struct net_device *dev; + netdevice_tracker dev_tracker; u32 flags; }; +static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info) +{ + dev_put_track(req_info->dev, &req_info->dev_tracker); +} + /** * struct ethnl_reply_data - base type of reply data for GET requests * @dev: device for current reply message; in single shot requests it is diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c index ee1e5806bc93..a8c113d244db 100644 --- a/net/ethtool/pause.c +++ b/net/ethtool/pause.c @@ -181,6 +181,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/privflags.c b/net/ethtool/privflags.c index fc9f3be23a19..4c7bfa81e4ab 100644 --- a/net/ethtool/privflags.c +++ b/net/ethtool/privflags.c @@ -196,6 +196,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index 450b8866373d..c1d5f5e0fdc9 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -196,6 +196,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c index e7f2ee0d2471..efde33536687 100644 --- a/net/ethtool/tunnels.c +++ b/net/ethtool/tunnels.c @@ -195,7 +195,7 @@ int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info) if (ret) goto err_free_msg; rtnl_unlock(); - dev_put(req_info.dev); + ethnl_parse_header_dev_put(&req_info); genlmsg_end(rskb, reply_payload); return genlmsg_reply(rskb, info); @@ -204,7 +204,7 @@ err_free_msg: nlmsg_free(rskb); err_unlock_rtnl: rtnl_unlock(); - dev_put(req_info.dev); + ethnl_parse_header_dev_put(&req_info); return ret; } @@ -230,7 +230,7 @@ int ethnl_tunnel_info_start(struct netlink_callback *cb) sock_net(cb->skb->sk), cb->extack, false); if (ctx->req_info.dev) { - dev_put(ctx->req_info.dev); + ethnl_parse_header_dev_put(&ctx->req_info); ctx->req_info.dev = NULL; } diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c index ada7df2331d2..88f435e76481 100644 --- a/net/ethtool/wol.c +++ b/net/ethtool/wol.c @@ -165,6 +165,6 @@ out_ops: out_rtnl: rtnl_unlock(); out_dev: - dev_put(dev); + ethnl_parse_header_dev_put(&req_info); return ret; } diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 4bb9401b0a3f..de610cb83694 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -169,7 +169,7 @@ static u32 prog_ops_moff(const struct bpf_prog *prog) t = bpf_tcp_congestion_ops.type; m = &btf_type_member(t)[midx]; - return btf_member_bit_offset(t, m) / 8; + return __btf_member_bit_offset(t, m) / 8; } static const struct bpf_func_proto * @@ -246,7 +246,7 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t, utcp_ca = (const struct tcp_congestion_ops *)udata; tcp_ca = (struct tcp_congestion_ops *)kdata; - moff = btf_member_bit_offset(t, member) / 8; + moff = __btf_member_bit_offset(t, member) / 8; switch (moff) { case offsetof(struct tcp_congestion_ops, flags): if (utcp_ca->flags & ~TCP_CONG_MASK) @@ -276,7 +276,7 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t, static int bpf_tcp_ca_check_member(const struct btf_type *t, const struct btf_member *member) { - if (is_unsupported(btf_member_bit_offset(t, member) / 8)) + if (is_unsupported(__btf_member_bit_offset(t, member) / 8)) return -ENOTSUPP; return 0; } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 323e622ff9b7..fba2bffd65f7 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -243,7 +243,7 @@ void in_dev_finish_destroy(struct in_device *idev) #ifdef NET_REFCNT_DEBUG pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL"); #endif - dev_put(dev); + dev_put_track(dev, &idev->dev_tracker); if (!idev->dead) pr_err("Freeing alive in_device %p\n", idev); else @@ -271,7 +271,7 @@ static struct in_device *inetdev_init(struct net_device *dev) if (IPV4_DEVCONF(in_dev->cnf, FORWARDING)) dev_disable_lro(dev); /* Reference in_dev->dev */ - dev_hold(dev); + dev_hold_track(dev, &in_dev->dev_tracker, GFP_KERNEL); /* Account for reference dev->ip_ptr (below) */ refcount_set(&in_dev->refcnt, 1); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 9fe13e4f5d08..4d61ddd8a0ec 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1582,7 +1582,7 @@ static int __net_init fib_net_init(struct net *net) int error; #ifdef CONFIG_IP_ROUTE_CLASSID - net->ipv4.fib_num_tclassid_users = 0; + atomic_set(&net->ipv4.fib_num_tclassid_users, 0); #endif error = ip_fib_net_init(net); if (error < 0) diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index ce54a30c2ef1..d279cb8ac158 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -141,6 +141,7 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_action(struct fib_rule *rule, } INDIRECT_CALLABLE_SCOPE bool fib4_rule_suppress(struct fib_rule *rule, + int flags, struct fib_lookup_arg *arg) { struct fib_result *result = (struct fib_result *) arg->result; @@ -263,7 +264,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, if (tb[FRA_FLOW]) { rule4->tclassid = nla_get_u32(tb[FRA_FLOW]); if (rule4->tclassid) - net->ipv4.fib_num_tclassid_users++; + atomic_inc(&net->ipv4.fib_num_tclassid_users); } #endif @@ -295,7 +296,7 @@ static int fib4_rule_delete(struct fib_rule *rule) #ifdef CONFIG_IP_ROUTE_CLASSID if (((struct fib4_rule *)rule)->tclassid) - net->ipv4.fib_num_tclassid_users--; + atomic_dec(&net->ipv4.fib_num_tclassid_users); #endif net->ipv4.fib_has_custom_rules = true; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 3364cb9c67e0..3cad543dc747 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -208,7 +208,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp) void fib_nh_common_release(struct fib_nh_common *nhc) { - dev_put(nhc->nhc_dev); + dev_put_track(nhc->nhc_dev, &nhc->nhc_dev_tracker); lwtstate_put(nhc->nhc_lwtstate); rt_fibinfo_free_cpus(nhc->nhc_pcpu_rth_output); rt_fibinfo_free(&nhc->nhc_rth_input); @@ -220,7 +220,7 @@ void fib_nh_release(struct net *net, struct fib_nh *fib_nh) { #ifdef CONFIG_IP_ROUTE_CLASSID if (fib_nh->nh_tclassid) - net->ipv4.fib_num_tclassid_users--; + atomic_dec(&net->ipv4.fib_num_tclassid_users); #endif fib_nh_common_release(&fib_nh->nh_common); } @@ -632,7 +632,7 @@ int fib_nh_init(struct net *net, struct fib_nh *nh, #ifdef CONFIG_IP_ROUTE_CLASSID nh->nh_tclassid = cfg->fc_flow; if (nh->nh_tclassid) - net->ipv4.fib_num_tclassid_users++; + atomic_inc(&net->ipv4.fib_num_tclassid_users); #endif #ifdef CONFIG_IP_ROUTE_MULTIPATH nh->fib_nh_weight = nh_weight; @@ -1006,7 +1006,7 @@ static int fib_check_nh_v6_gw(struct net *net, struct fib_nh *nh, err = ipv6_stub->fib6_nh_init(net, &fib6_nh, &cfg, GFP_KERNEL, extack); if (!err) { nh->fib_nh_dev = fib6_nh.fib_nh_dev; - dev_hold(nh->fib_nh_dev); + dev_hold_track(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_KERNEL); nh->fib_nh_oif = nh->fib_nh_dev->ifindex; nh->fib_nh_scope = RT_SCOPE_LINK; @@ -1090,7 +1090,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table, if (!netif_carrier_ok(dev)) nh->fib_nh_flags |= RTNH_F_LINKDOWN; nh->fib_nh_dev = dev; - dev_hold(dev); + dev_hold_track(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC); nh->fib_nh_scope = RT_SCOPE_LINK; return 0; } @@ -1144,7 +1144,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table, "No egress device for nexthop gateway"); goto out; } - dev_hold(dev); + dev_hold_track(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC); if (!netif_carrier_ok(dev)) nh->fib_nh_flags |= RTNH_F_LINKDOWN; err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN; @@ -1178,7 +1178,7 @@ static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh, } nh->fib_nh_dev = in_dev->dev; - dev_hold(nh->fib_nh_dev); + dev_hold_track(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC); nh->fib_nh_scope = RT_SCOPE_HOST; if (!netif_carrier_ok(nh->fib_nh_dev)) nh->fib_nh_flags |= RTNH_F_LINKDOWN; @@ -1508,6 +1508,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg, err = -ENODEV; if (!nh->fib_nh_dev) goto failure; + netdev_tracker_alloc(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, + GFP_KERNEL); } else { int linkdown = 0; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 23da67a3fc06..fc2a985f6064 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -721,7 +721,7 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req, sk_node_init(&nreq_sk->sk_node); nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping; -#ifdef CONFIG_XPS +#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping; #endif nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 2dda856ca260..4c7aca884fa9 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -696,7 +696,7 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify, if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify) unregister_netdevice_queue(dev, head); - dev_put(dev); + dev_put_track(dev, &v->dev_tracker); return 0; } @@ -896,6 +896,7 @@ static int vif_add(struct net *net, struct mr_table *mrt, /* And finish update writing critical data */ write_lock_bh(&mrt_lock); v->dev = dev; + netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC); if (v->flags & VIFF_REGISTER) mrt->mroute_reg_vif_num = vifi; if (vifi+1 > mrt->maxvif) diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 43b7a77cd6b4..f30273afb539 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -297,7 +297,6 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPDSACKIgnoredDubious", LINUX_MIB_TCPDSACKIGNOREDDUBIOUS), SNMP_MIB_ITEM("TCPMigrateReqSuccess", LINUX_MIB_TCPMIGRATEREQSUCCESS), SNMP_MIB_ITEM("TCPMigrateReqFailure", LINUX_MIB_TCPMIGRATEREQFAILURE), - SNMP_MIB_ITEM("TCPSmallQueueFailure", LINUX_MIB_TCPSMALLQUEUEFAILURE), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 243a0c52be42..843a7a3699fe 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1531,8 +1531,9 @@ void rt_flush_dev(struct net_device *dev) if (rt->dst.dev != dev) continue; rt->dst.dev = blackhole_netdev; - dev_hold(rt->dst.dev); - dev_put(dev); + dev_replace_track(dev, blackhole_netdev, + &rt->dst.dev_tracker, + GFP_ATOMIC); } spin_unlock_bh(&ul->lock); } @@ -2819,7 +2820,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or new->output = dst_discard_out; new->dev = net->loopback_dev; - dev_hold(new->dev); + dev_hold_track(new->dev, &new->dev_tracker, GFP_ATOMIC); rt->rt_is_input = ort->rt_is_input; rt->rt_iif = ort->rt_iif; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 6ab82e1a1d41..20054618c87e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3207,7 +3207,7 @@ static void tcp_enable_tx_delay(void) * TCP_CORK can be set together with TCP_NODELAY and it is stronger than * TCP_NODELAY. */ -static void __tcp_sock_set_cork(struct sock *sk, bool on) +void __tcp_sock_set_cork(struct sock *sk, bool on) { struct tcp_sock *tp = tcp_sk(sk); @@ -3235,7 +3235,7 @@ EXPORT_SYMBOL(tcp_sock_set_cork); * However, when TCP_NODELAY is set we make an explicit push, which overrides * even TCP_CORK for currently queued segments. */ -static void __tcp_sock_set_nodelay(struct sock *sk, bool on) +void __tcp_sock_set_nodelay(struct sock *sk, bool on) { if (on) { tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index cf913a66df17..7c2d3ac2363a 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -829,8 +829,8 @@ int tcp_child_process(struct sock *parent, struct sock *child, int ret = 0; int state = child->sk_state; - /* record NAPI ID of child */ - sk_mark_napi_id(child, skb); + /* record sk_napi_id and sk_rx_queue_mapping of child. */ + sk_mark_napi_id_set(child, skb); tcp_segs_in(tcp_sk(child), skb); if (!sock_owned_by_user(child)) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index c4ab6c8f0c77..5079832af5c1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2524,11 +2524,8 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, * test again the condition. */ smp_mb__after_atomic(); - if (refcount_read(&sk->sk_wmem_alloc) > limit) { - NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPSMALLQUEUEFAILURE); + if (refcount_read(&sk->sk_wmem_alloc) > limit) return true; - } } return false; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 799b663daf87..69d30053fed9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -916,7 +916,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, kfree_skb(skb); return -EINVAL; } - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { + if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { kfree_skb(skb); return -EINVAL; } diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 9ebd54752e03..9e83bcb6bc99 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -77,7 +77,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, xdst->u.rt.rt_iif = fl4->flowi4_iif; xdst->u.dst.dev = dev; - dev_hold(dev); + dev_hold_track(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC); /* Sheit... I remember I did this right. Apparently, * it was magically lost, so this code needs audit */ diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3445f8017430..3eee17790a82 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -405,13 +405,13 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) if (ndev->cnf.forwarding) dev_disable_lro(dev); /* We refer to the device */ - dev_hold(dev); + dev_hold_track(dev, &ndev->dev_tracker, GFP_KERNEL); if (snmp6_alloc_dev(ndev) < 0) { netdev_dbg(dev, "%s: cannot allocate memory for statistics\n", __func__); neigh_parms_release(&nd_tbl, ndev->nd_parms); - dev_put(dev); + dev_put_track(dev, &ndev->dev_tracker); kfree(ndev); return ERR_PTR(err); } diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index 1d4054bb345b..881d1477d24a 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -263,7 +263,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) #ifdef NET_REFCNT_DEBUG pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); #endif - dev_put(dev); + dev_put_track(dev, &idev->dev_tracker); if (!idev->dead) { pr_warn("Freeing alive inet6 device %p\n", idev); return; diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 40f3e4f9f33a..dcedfe29d9d9 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -267,6 +267,7 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_action(struct fib_rule *rule, } INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule, + int flags, struct fib_lookup_arg *arg) { struct fib6_result *res = arg->result; @@ -294,8 +295,7 @@ INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule, return false; suppress_route: - if (!(arg->flags & FIB_LOOKUP_NOREF)) - ip6_rt_put(rt); + ip6_rt_put_flags(rt, flags); return true; } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index d831d2439693..110839a88bc2 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -403,7 +403,7 @@ static void ip6erspan_tunnel_uninit(struct net_device *dev) ip6erspan_tunnel_unlink_md(ign, t); ip6gre_tunnel_unlink(ign, t); dst_cache_reset(&t->dst_cache); - dev_put(dev); + dev_put_track(dev, &t->dev_tracker); } static void ip6gre_tunnel_uninit(struct net_device *dev) @@ -416,7 +416,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) if (ign->fb_tunnel_dev == dev) WRITE_ONCE(ign->fb_tunnel_dev, NULL); dst_cache_reset(&t->dst_cache); - dev_put(dev); + dev_put_track(dev, &t->dev_tracker); } @@ -1496,7 +1496,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) } ip6gre_tnl_init_features(dev); - dev_hold(dev); + dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL); return 0; cleanup_dst_cache_init: @@ -1888,7 +1888,7 @@ static int ip6erspan_tap_init(struct net_device *dev) dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; ip6erspan_tnl_link_config(tunnel, 1); - dev_hold(dev); + dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL); return 0; cleanup_dst_cache_init: diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 48674888f2dc..b29e9ba5e113 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -247,9 +247,9 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, * memcmp() alone below is sufficient, right? */ if ((first_word & htonl(0xF00FFFFF)) || - !ipv6_addr_equal(&iph->saddr, &iph2->saddr) || - !ipv6_addr_equal(&iph->daddr, &iph2->daddr) || - *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) { + !ipv6_addr_equal(&iph->saddr, &iph2->saddr) || + !ipv6_addr_equal(&iph->daddr, &iph2->daddr) || + *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) { not_same_flow: NAPI_GRO_CB(p)->same_flow = 0; continue; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 484aca492cc0..fe786df4f849 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -383,7 +383,7 @@ ip6_tnl_dev_uninit(struct net_device *dev) else ip6_tnl_unlink(ip6n, t); dst_cache_reset(&t->dst_cache); - dev_put(dev); + dev_put_track(dev, &t->dev_tracker); } /** @@ -1883,7 +1883,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev) dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len; - dev_hold(dev); + dev_hold_track(dev, &t->dev_tracker, GFP_KERNEL); return 0; destroy_dst: diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 527e9ead7449..ed9b6d6ca65e 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -293,7 +293,7 @@ static void vti6_dev_uninit(struct net_device *dev) RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL); else vti6_tnl_unlink(ip6n, t); - dev_put(dev); + dev_put_track(dev, &t->dev_tracker); } static int vti6_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi, @@ -934,7 +934,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev) dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; - dev_hold(dev); + dev_hold_track(dev, &t->dev_tracker, GFP_KERNEL); return 0; } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 36ed9efb8825..a77a15a7f3dc 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -746,7 +746,7 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify, if ((v->flags & MIFF_REGISTER) && !notify) unregister_netdevice_queue(dev, head); - dev_put(dev); + dev_put_track(dev, &v->dev_tracker); return 0; } @@ -919,6 +919,7 @@ static int mif6_add(struct net *net, struct mr_table *mrt, /* And finish update writing critical data */ write_lock_bh(&mrt_lock); v->dev = dev; + netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC); #ifdef CONFIG_IPV6_PIMSM_V2 if (v->flags & MIFF_REGISTER) mrt->mroute_reg_vif_num = vifi; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f0d29fcb2094..03be0e6b4826 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -182,8 +182,9 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) if (rt_dev == dev) { rt->dst.dev = blackhole_netdev; - dev_hold(rt->dst.dev); - dev_put(rt_dev); + dev_replace_track(rt_dev, blackhole_netdev, + &rt->dst.dev_tracker, + GFP_ATOMIC); } } spin_unlock_bh(&ul->lock); @@ -592,6 +593,7 @@ struct __rt6_probe_work { struct work_struct work; struct in6_addr target; struct net_device *dev; + netdevice_tracker dev_tracker; }; static void rt6_probe_deferred(struct work_struct *w) @@ -602,7 +604,7 @@ static void rt6_probe_deferred(struct work_struct *w) addrconf_addr_solict_mult(&work->target, &mcaddr); ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0); - dev_put(work->dev); + dev_put_track(work->dev, &work->dev_tracker); kfree(work); } @@ -656,7 +658,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh) } else { INIT_WORK(&work->work, rt6_probe_deferred); work->target = *nh_gw; - dev_hold(dev); + dev_hold_track(dev, &work->dev_tracker, GFP_ATOMIC); work->dev = dev; schedule_work(&work->work); } @@ -3626,6 +3628,8 @@ pcpu_alloc: } fib6_nh->fib_nh_dev = dev; + netdev_tracker_alloc(dev, &fib6_nh->fib_nh_dev_tracker, gfp_flags); + fib6_nh->fib_nh_oif = dev->ifindex; err = 0; out: diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 3adc5d9211ad..d64855010948 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -161,6 +161,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + + /* the control block has been erased, so we have to set the + * iif once again. + * We read the receiving interface index directly from the + * skb->skb_iif as it is done in the IPv4 receiving path (i.e.: + * ip_rcv_core(...)). + */ + IP6CB(skb)->iif = skb->skb_iif; } hdr->nexthdr = NEXTHDR_ROUTING; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1b57ee36d668..057c0f83c800 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -521,7 +521,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev) ipip6_tunnel_del_prl(tunnel, NULL); } dst_cache_reset(&tunnel->dst_cache); - dev_put(dev); + dev_put_track(dev, &tunnel->dev_tracker); } static int ipip6_err(struct sk_buff *skb, u32 info) @@ -1463,7 +1463,7 @@ static int ipip6_tunnel_init(struct net_device *dev) dev->tstats = NULL; return err; } - dev_hold(dev); + dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL); return 0; } diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index af7a4b8b1e9c..fad687ee6dd8 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -74,11 +74,11 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, struct rt6_info *rt = (struct rt6_info *)xdst->route; xdst->u.dst.dev = dev; - dev_hold(dev); + dev_hold_track(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC); xdst->u.rt6.rt6i_idev = in6_dev_get(dev); if (!xdst->u.rt6.rt6i_idev) { - dev_put(dev); + dev_put_track(dev, &xdst->u.dst.dev_tracker); return -ENODEV; } diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index acf6e1343b88..9d1aafe75f92 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -32,7 +32,8 @@ static struct dentry *rootdir; struct l2tp_dfs_seq_data { - struct net *net; + struct net *net; + netns_tracker ns_tracker; int tunnel_idx; /* current tunnel */ int session_idx; /* index of session within current tunnel */ struct l2tp_tunnel *tunnel; @@ -281,7 +282,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file) rc = PTR_ERR(pd->net); goto err_free_pd; } - + netns_tracker_alloc(pd->net, &pd->ns_tracker, GFP_KERNEL); rc = seq_open(file, &l2tp_dfs_seq_ops); if (rc) goto err_free_net; @@ -293,7 +294,7 @@ out: return rc; err_free_net: - put_net(pd->net); + put_net_track(pd->net, &pd->ns_tracker); err_free_pd: kfree(pd); goto out; @@ -307,7 +308,7 @@ static int l2tp_dfs_seq_release(struct inode *inode, struct file *file) seq = file->private_data; pd = seq->private; if (pd->net) - put_net(pd->net); + put_net_track(pd->net, &pd->ns_tracker); kfree(pd); seq_release(inode, file); diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 3086f4a6ae68..26c00ebf4fba 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -224,7 +224,7 @@ static int llc_ui_release(struct socket *sock) } else { release_sock(sk); } - dev_put(llc->dev); + dev_put_track(llc->dev, &llc->dev_tracker); sock_put(sk); llc_sk_free(sk); out: @@ -295,6 +295,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd); if (!llc->dev) goto out; + netdev_tracker_alloc(llc->dev, &llc->dev_tracker, GFP_KERNEL); rc = -EUSERS; llc->laddr.lsap = llc_ui_autoport(); if (!llc->laddr.lsap) @@ -362,7 +363,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) } else llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd, addr->sllc_mac); - dev_hold(llc->dev); + dev_hold_track(llc->dev, &llc->dev_tracker, GFP_ATOMIC); rcu_read_unlock(); if (!llc->dev) goto out; diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index 0ff490a73fae..07e9abb5978a 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c @@ -195,7 +195,7 @@ static int llc_seq_core_show(struct seq_file *seq, void *v) timer_pending(&llc->pf_cycle_timer.timer), timer_pending(&llc->rej_sent_timer.timer), timer_pending(&llc->busy_state_timer.timer), - !!sk->sk_backlog.tail, !!sk->sk_lock.owned); + !!sk->sk_backlog.tail, sock_owned_by_user_nocheck(sk)); out: return 0; } diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index 871cf6266125..c921de63b494 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -405,8 +405,7 @@ static void mctp_sk_unhash(struct sock *sk) trace_mctp_key_release(key, MCTP_TRACE_KEY_CLOSED); spin_lock(&key->lock); - if (key->reasm_head) - kfree_skb(key->reasm_head); + kfree_skb(key->reasm_head); key->reasm_head = NULL; key->reasm_dead = true; key->valid = false; diff --git a/net/mctp/route.c b/net/mctp/route.c index 46c44823edb7..8d9f4ff3e285 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -231,9 +231,7 @@ static void __mctp_key_unlock_drop(struct mctp_sk_key *key, struct net *net, /* and one for the local reference */ mctp_key_unref(key); - if (skb) - kfree_skb(skb); - + kfree_skb(skb); } #ifdef CONFIG_MCTP_FLOWS @@ -892,8 +890,7 @@ out_release: if (!ext_rt) mctp_route_release(rt); - if (dev) - dev_put(dev); + dev_put(dev); return rc; @@ -952,7 +949,7 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start, } static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start, - unsigned int daddr_extent) + unsigned int daddr_extent, unsigned char type) { struct net *net = dev_net(mdev->dev); struct mctp_route *rt, *tmp; @@ -969,7 +966,8 @@ static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start, list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { if (rt->dev == mdev && - rt->min == daddr_start && rt->max == daddr_end) { + rt->min == daddr_start && rt->max == daddr_end && + rt->type == type) { list_del_rcu(&rt->list); /* TODO: immediate RTM_DELROUTE */ mctp_route_release(rt); @@ -987,7 +985,7 @@ int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr) int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr) { - return mctp_route_remove(mdev, addr, 0); + return mctp_route_remove(mdev, addr, 0, RTN_LOCAL); } /* removes all entries for a given device */ @@ -1195,7 +1193,7 @@ static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, if (rtm->rtm_type != RTN_UNICAST) return -EINVAL; - rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len); + rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST); return rc; } diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c index cc6b8803aa9d..7b7918702592 100644 --- a/net/mctp/test/utils.c +++ b/net/mctp/test/utils.c @@ -12,7 +12,7 @@ static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb, struct net_device *ndev) { - kfree(skb); + kfree_skb(skb); return NETDEV_TX_OK; } diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 9fab40aa5c84..48f75a56f4ae 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -409,7 +409,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, goto err; /* Find the output device */ - out_dev = rcu_dereference(nh->nh_dev); + out_dev = nh->nh_dev; if (!mpls_output_possible(out_dev)) goto tx_err; @@ -698,7 +698,7 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt, (dev->addr_len != nh->nh_via_alen)) goto errout; - RCU_INIT_POINTER(nh->nh_dev, dev); + nh->nh_dev = dev; if (!(dev->flags & IFF_UP)) { nh->nh_flags |= RTNH_F_DEAD; @@ -1491,26 +1491,53 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head) kfree(mdev); } -static void mpls_ifdown(struct net_device *dev, int event) +static int mpls_ifdown(struct net_device *dev, int event) { struct mpls_route __rcu **platform_label; struct net *net = dev_net(dev); - u8 alive, deleted; unsigned index; platform_label = rtnl_dereference(net->mpls.platform_label); for (index = 0; index < net->mpls.platform_labels; index++) { struct mpls_route *rt = rtnl_dereference(platform_label[index]); + bool nh_del = false; + u8 alive = 0; if (!rt) continue; - alive = 0; - deleted = 0; + if (event == NETDEV_UNREGISTER) { + u8 deleted = 0; + + for_nexthops(rt) { + if (!nh->nh_dev || nh->nh_dev == dev) + deleted++; + if (nh->nh_dev == dev) + nh_del = true; + } endfor_nexthops(rt); + + /* if there are no more nexthops, delete the route */ + if (deleted == rt->rt_nhn) { + mpls_route_update(net, index, NULL, NULL); + continue; + } + + if (nh_del) { + size_t size = sizeof(*rt) + rt->rt_nhn * + rt->rt_nh_size; + struct mpls_route *orig = rt; + + rt = kmalloc(size, GFP_KERNEL); + if (!rt) + return -ENOMEM; + memcpy(rt, orig, size); + } + } + change_nexthops(rt) { unsigned int nh_flags = nh->nh_flags; - if (rtnl_dereference(nh->nh_dev) != dev) + if (nh->nh_dev != dev) goto next; switch (event) { @@ -1523,23 +1550,22 @@ static void mpls_ifdown(struct net_device *dev, int event) break; } if (event == NETDEV_UNREGISTER) - RCU_INIT_POINTER(nh->nh_dev, NULL); + nh->nh_dev = NULL; if (nh->nh_flags != nh_flags) WRITE_ONCE(nh->nh_flags, nh_flags); next: if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))) alive++; - if (!rtnl_dereference(nh->nh_dev)) - deleted++; } endfor_nexthops(rt); WRITE_ONCE(rt->rt_nhn_alive, alive); - /* if there are no more nexthops, delete the route */ - if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn) - mpls_route_update(net, index, NULL, NULL); + if (nh_del) + mpls_route_update(net, index, rt, NULL); } + + return 0; } static void mpls_ifup(struct net_device *dev, unsigned int flags) @@ -1559,14 +1585,12 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags) alive = 0; change_nexthops(rt) { unsigned int nh_flags = nh->nh_flags; - struct net_device *nh_dev = - rtnl_dereference(nh->nh_dev); if (!(nh_flags & flags)) { alive++; continue; } - if (nh_dev != dev) + if (nh->nh_dev != dev) continue; alive++; nh_flags &= ~flags; @@ -1597,8 +1621,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, return NOTIFY_OK; switch (event) { + int err; + case NETDEV_DOWN: - mpls_ifdown(dev, event); + err = mpls_ifdown(dev, event); + if (err) + return notifier_from_errno(err); break; case NETDEV_UP: flags = dev_get_flags(dev); @@ -1609,13 +1637,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, break; case NETDEV_CHANGE: flags = dev_get_flags(dev); - if (flags & (IFF_RUNNING | IFF_LOWER_UP)) + if (flags & (IFF_RUNNING | IFF_LOWER_UP)) { mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN); - else - mpls_ifdown(dev, event); + } else { + err = mpls_ifdown(dev, event); + if (err) + return notifier_from_errno(err); + } break; case NETDEV_UNREGISTER: - mpls_ifdown(dev, event); + err = mpls_ifdown(dev, event); + if (err) + return notifier_from_errno(err); mdev = mpls_dev_get(dev); if (mdev) { mpls_dev_sysctl_unregister(dev, mdev); @@ -1626,8 +1659,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, case NETDEV_CHANGENAME: mdev = mpls_dev_get(dev); if (mdev) { - int err; - mpls_dev_sysctl_unregister(dev, mdev); err = mpls_dev_sysctl_register(dev, mdev); if (err) @@ -1994,7 +2025,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), nh->nh_via_alen)) goto nla_put_failure; - dev = rtnl_dereference(nh->nh_dev); + dev = nh->nh_dev; if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex)) goto nla_put_failure; if (nh->nh_flags & RTNH_F_LINKDOWN) @@ -2012,7 +2043,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, goto nla_put_failure; for_nexthops(rt) { - dev = rtnl_dereference(nh->nh_dev); + dev = nh->nh_dev; if (!dev) continue; @@ -2123,18 +2154,14 @@ static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh, static bool mpls_rt_uses_dev(struct mpls_route *rt, const struct net_device *dev) { - struct net_device *nh_dev; - if (rt->rt_nhn == 1) { struct mpls_nh *nh = rt->rt_nh; - nh_dev = rtnl_dereference(nh->nh_dev); - if (dev == nh_dev) + if (nh->nh_dev == dev) return true; } else { for_nexthops(rt) { - nh_dev = rtnl_dereference(nh->nh_dev); - if (nh_dev == dev) + if (nh->nh_dev == dev) return true; } endfor_nexthops(rt); } @@ -2222,7 +2249,7 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt) size_t nhsize = 0; for_nexthops(rt) { - if (!rtnl_dereference(nh->nh_dev)) + if (!nh->nh_dev) continue; nhsize += nla_total_size(sizeof(struct rtnexthop)); /* RTA_VIA */ @@ -2468,7 +2495,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), nh->nh_via_alen)) goto nla_put_failure; - dev = rtnl_dereference(nh->nh_dev); + dev = nh->nh_dev; if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex)) goto nla_put_failure; @@ -2507,7 +2534,7 @@ static int resize_platform_label_table(struct net *net, size_t limit) rt0 = mpls_rt_alloc(1, lo->addr_len, 0); if (IS_ERR(rt0)) goto nort0; - RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo); + rt0->rt_nh->nh_dev = lo; rt0->rt_protocol = RTPROT_KERNEL; rt0->rt_payload_type = MPT_IPV4; rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT; @@ -2521,7 +2548,7 @@ static int resize_platform_label_table(struct net *net, size_t limit) rt2 = mpls_rt_alloc(1, lo->addr_len, 0); if (IS_ERR(rt2)) goto nort2; - RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo); + rt2->rt_nh->nh_dev = lo; rt2->rt_protocol = RTPROT_KERNEL; rt2->rt_payload_type = MPT_IPV6; rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT; diff --git a/net/mpls/internal.h b/net/mpls/internal.h index 56c5cad3a230..b9f492ddf93b 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -87,7 +87,7 @@ enum mpls_payload_type { }; struct mpls_nh { /* next hop label forwarding entry */ - struct net_device __rcu *nh_dev; + struct net_device *nh_dev; /* nh_flags is accessed under RCU in the packet path; it is * modified handling netdev events with rtnl lock held diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 7b96be1e9f14..4ff8d55cbe82 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -1702,22 +1702,28 @@ next: static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info) { + struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }, *entry; struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; struct pm_nl_pernet *pernet = genl_info_pm_nl(info); - struct mptcp_pm_addr_entry addr, *entry; struct net *net = sock_net(skb->sk); - u8 bkup = 0; + u8 bkup = 0, lookup_by_id = 0; int ret; - ret = mptcp_pm_parse_addr(attr, info, true, &addr); + ret = mptcp_pm_parse_addr(attr, info, false, &addr); if (ret < 0) return ret; if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP) bkup = 1; + if (addr.addr.family == AF_UNSPEC) { + lookup_by_id = 1; + if (!addr.addr.id) + return -EOPNOTSUPP; + } list_for_each_entry(entry, &pernet->local_addr_list, list) { - if (addresses_equal(&entry->addr, &addr.addr, true)) { + if ((!lookup_by_id && addresses_equal(&entry->addr, &addr.addr, true)) || + (lookup_by_id && entry->addr.id == addr.addr.id)) { mptcp_nl_addr_backup(net, &entry->addr, bkup); if (bkup) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index b100048e43fe..f124cca125d2 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -22,6 +22,7 @@ #endif #include <net/mptcp.h> #include <net/xfrm.h> +#include <asm/ioctls.h> #include "protocol.h" #include "mib.h" @@ -46,6 +47,7 @@ struct mptcp_skb_cb { enum { MPTCP_CMSG_TS = BIT(0), + MPTCP_CMSG_INQ = BIT(1), }; static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp; @@ -738,6 +740,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk) MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, delta); MPTCP_SKB_CB(skb)->offset += delta; + MPTCP_SKB_CB(skb)->map_seq += delta; __skb_queue_tail(&sk->sk_receive_queue, skb); } msk->ack_seq = end_seq; @@ -1499,7 +1502,7 @@ static void mptcp_update_post_push(struct mptcp_sock *msk, msk->snd_nxt = snd_nxt_new; } -static void mptcp_check_and_set_pending(struct sock *sk) +void mptcp_check_and_set_pending(struct sock *sk) { if (mptcp_send_head(sk) && !test_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) @@ -1784,8 +1787,10 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, copied += count; if (count < data_len) { - if (!(flags & MSG_PEEK)) + if (!(flags & MSG_PEEK)) { MPTCP_SKB_CB(skb)->offset += count; + MPTCP_SKB_CB(skb)->map_seq += count; + } break; } @@ -1965,6 +1970,27 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk) return !skb_queue_empty(&msk->receive_queue); } +static unsigned int mptcp_inq_hint(const struct sock *sk) +{ + const struct mptcp_sock *msk = mptcp_sk(sk); + const struct sk_buff *skb; + + skb = skb_peek(&msk->receive_queue); + if (skb) { + u64 hint_val = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; + + if (hint_val >= INT_MAX) + return INT_MAX; + + return (unsigned int)hint_val; + } + + if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) + return 1; + + return 0; +} + static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { @@ -1989,6 +2015,9 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, len = min_t(size_t, len, INT_MAX); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); + if (unlikely(msk->recvmsg_inq)) + cmsg_flags = MPTCP_CMSG_INQ; + while (copied < len) { int bytes_read; @@ -2062,6 +2091,12 @@ out_err: if (cmsg_flags && copied >= 0) { if (cmsg_flags & MPTCP_CMSG_TS) tcp_recv_timestamp(msg, sk, &tss); + + if (cmsg_flags & MPTCP_CMSG_INQ) { + unsigned int inq = mptcp_inq_hint(sk); + + put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq); + } } pr_debug("msk=%p rx queue empty=%d:%d copied=%d", @@ -3177,6 +3212,57 @@ static int mptcp_forward_alloc_get(const struct sock *sk) return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc; } +static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) +{ + const struct sock *sk = (void *)msk; + u64 delta; + + if (sk->sk_state == TCP_LISTEN) + return -EINVAL; + + if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) + return 0; + + delta = msk->write_seq - v; + if (delta > INT_MAX) + delta = INT_MAX; + + return (int)delta; +} + +static int mptcp_ioctl(struct sock *sk, int cmd, unsigned long arg) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + bool slow; + int answ; + + switch (cmd) { + case SIOCINQ: + if (sk->sk_state == TCP_LISTEN) + return -EINVAL; + + lock_sock(sk); + __mptcp_move_skbs(msk); + answ = mptcp_inq_hint(sk); + release_sock(sk); + break; + case SIOCOUTQ: + slow = lock_sock_fast(sk); + answ = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una)); + unlock_sock_fast(sk, slow); + break; + case SIOCOUTQNSD: + slow = lock_sock_fast(sk); + answ = mptcp_ioctl_outq(msk, msk->snd_nxt); + unlock_sock_fast(sk, slow); + break; + default: + return -ENOIOCTLCMD; + } + + return put_user(answ, (int __user *)arg); +} + static struct proto mptcp_prot = { .name = "MPTCP", .owner = THIS_MODULE, @@ -3189,6 +3275,7 @@ static struct proto mptcp_prot = { .shutdown = mptcp_shutdown, .destroy = mptcp_destroy, .sendmsg = mptcp_sendmsg, + .ioctl = mptcp_ioctl, .recvmsg = mptcp_recvmsg, .release_cb = mptcp_release_cb, .hash = mptcp_hash, diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index d87cc040352e..e1469155fb15 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -249,6 +249,9 @@ struct mptcp_sock { bool rcv_fastclose; bool use_64bit_ack; /* Set when we received a 64-bit DSN */ bool csum_enabled; + u8 recvmsg_inq:1, + cork:1, + nodelay:1; spinlock_t join_list_lock; struct work_struct work; struct sk_buff *ooo_last_skb; @@ -554,6 +557,7 @@ unsigned int mptcp_stale_loss_cnt(const struct net *net); void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, struct mptcp_options_received *mp_opt); bool __mptcp_retransmit_pending_data(struct sock *sk); +void mptcp_check_and_set_pending(struct sock *sk); void __mptcp_push_pending(struct sock *sk, unsigned int flags); bool mptcp_subflow_data_available(struct sock *sk); void __init mptcp_subflow_init(void); diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index fb43e145cb57..3c3db22fd36a 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -557,6 +557,7 @@ static bool mptcp_supported_sockopt(int level, int optname) case TCP_TIMESTAMP: case TCP_NOTSENT_LOWAT: case TCP_TX_DELAY: + case TCP_INQ: return true; } @@ -568,7 +569,6 @@ static bool mptcp_supported_sockopt(int level, int optname) /* TCP_FASTOPEN_KEY, TCP_FASTOPEN TCP_FASTOPEN_CONNECT, TCP_FASTOPEN_NO_COOKIE, * are not supported fastopen is currently unsupported */ - /* TCP_INQ is currently unsupported, needs some recvmsg work */ } return false; } @@ -616,6 +616,66 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t return ret; } +static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optval, + unsigned int optlen) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + int val; + + if (optlen < sizeof(int)) + return -EINVAL; + + if (copy_from_sockptr(&val, optval, sizeof(val))) + return -EFAULT; + + lock_sock(sk); + sockopt_seq_inc(msk); + msk->cork = !!val; + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + lock_sock(ssk); + __tcp_sock_set_cork(ssk, !!val); + release_sock(ssk); + } + if (!val) + mptcp_check_and_set_pending(sk); + release_sock(sk); + + return 0; +} + +static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t optval, + unsigned int optlen) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + int val; + + if (optlen < sizeof(int)) + return -EINVAL; + + if (copy_from_sockptr(&val, optval, sizeof(val))) + return -EFAULT; + + lock_sock(sk); + sockopt_seq_inc(msk); + msk->nodelay = !!val; + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + lock_sock(ssk); + __tcp_sock_set_nodelay(ssk, !!val); + release_sock(ssk); + } + if (val) + mptcp_check_and_set_pending(sk); + release_sock(sk); + + return 0; +} + static int mptcp_setsockopt_sol_ip_set_transparent(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { @@ -698,11 +758,29 @@ static int mptcp_setsockopt_v4(struct mptcp_sock *msk, int optname, static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { + struct sock *sk = (void *)msk; + int ret, val; + switch (optname) { + case TCP_INQ: + ret = mptcp_get_int_option(msk, optval, optlen, &val); + if (ret) + return ret; + if (val < 0 || val > 1) + return -EINVAL; + + lock_sock(sk); + msk->recvmsg_inq = !!val; + release_sock(sk); + return 0; case TCP_ULP: return -EOPNOTSUPP; case TCP_CONGESTION: return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen); + case TCP_CORK: + return mptcp_setsockopt_sol_tcp_cork(msk, optval, optlen); + case TCP_NODELAY: + return mptcp_setsockopt_sol_tcp_nodelay(msk, optval, optlen); } return -EOPNOTSUPP; @@ -1032,6 +1110,35 @@ static int mptcp_getsockopt_subflow_addrs(struct mptcp_sock *msk, char __user *o return 0; } +static int mptcp_put_int_option(struct mptcp_sock *msk, char __user *optval, + int __user *optlen, int val) +{ + int len; + + if (get_user(len, optlen)) + return -EFAULT; + if (len < 0) + return -EINVAL; + + if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { + unsigned char ucval = (unsigned char)val; + + len = 1; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &ucval, 1)) + return -EFAULT; + } else { + len = min_t(unsigned int, len, sizeof(int)); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + } + + return 0; +} + static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname, char __user *optval, int __user *optlen) { @@ -1042,10 +1149,29 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname, case TCP_CC_INFO: return mptcp_getsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen); + case TCP_INQ: + return mptcp_put_int_option(msk, optval, optlen, msk->recvmsg_inq); + case TCP_CORK: + return mptcp_put_int_option(msk, optval, optlen, msk->cork); + case TCP_NODELAY: + return mptcp_put_int_option(msk, optval, optlen, msk->nodelay); } return -EOPNOTSUPP; } +static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = (void *)msk; + + switch (optname) { + case IP_TOS: + return mptcp_put_int_option(msk, optval, optlen, inet_sk(sk)->tos); + } + + return -EOPNOTSUPP; +} + static int mptcp_getsockopt_sol_mptcp(struct mptcp_sock *msk, int optname, char __user *optval, int __user *optlen) { @@ -1081,6 +1207,8 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname, if (ssk) return tcp_getsockopt(ssk, level, optname, optval, option); + if (level == SOL_IP) + return mptcp_getsockopt_v4(msk, optname, optval, option); if (level == SOL_TCP) return mptcp_getsockopt_sol_tcp(msk, optname, optval, option); if (level == SOL_MPTCP) @@ -1129,6 +1257,8 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk) if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops) tcp_set_congestion_control(ssk, msk->ca_name, false, true); + __tcp_sock_set_cork(ssk, !!msk->cork); + __tcp_sock_set_nodelay(ssk, !!msk->nodelay); inet_sk(ssk)->transparent = inet_sk(sk)->transparent; inet_sk(ssk)->freebind = inet_sk(sk)->freebind; diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index b8dd3441f7d0..24bc9d5e87be 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1534,7 +1534,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) * needs it. */ sf->sk->sk_net_refcnt = 1; - get_net(net); + get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); sock_inuse_add(net, 1); err = tcp_set_ulp(sf->sk, "mptcp"); release_sock(sf->sk); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index b622ef143415..d7e313548066 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -684,7 +684,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) tstamp = nf_conn_tstamp_find(ct); if (tstamp) { - s32 timeout = ct->timeout - nfct_time_stamp; + s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp; tstamp->stop = ktime_get_real_ns(); if (timeout < 0) @@ -1036,7 +1036,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx) } /* We want the clashing entry to go away real soon: 1 second timeout. */ - loser_ct->timeout = nfct_time_stamp + HZ; + WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ); /* IPS_NAT_CLASH removes the entry automatically on the first * reply. Also prevents UDP tracker from moving the entry to @@ -1560,7 +1560,7 @@ __nf_conntrack_alloc(struct net *net, /* save hash for reusing when confirming */ *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; ct->status = 0; - ct->timeout = 0; + WRITE_ONCE(ct->timeout, 0); write_pnet(&ct->ct_net, net); memset_after(ct, 0, __nfct_init_offset); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index f9f5cb46c43d..4f53d9480ed5 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1998,7 +1998,7 @@ static int ctnetlink_change_timeout(struct nf_conn *ct, if (timeout > INT_MAX) timeout = INT_MAX; - ct->timeout = nfct_time_stamp + (u32)timeout; + WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout); if (test_bit(IPS_DYING_BIT, &ct->status)) return -ETIME; diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 87a7388b6c89..ed37bb9b4e58 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -201,8 +201,8 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) if (timeout < 0) timeout = 0; - if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout) - ct->timeout = nfct_time_stamp + timeout; + if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout) + WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout); } static void flow_offload_fixup_ct_state(struct nf_conn *ct) diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index b61165e97252..08771f47d469 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -387,7 +387,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, struct net_device *indev; struct net_device *outdev; struct nf_conn *ct = NULL; - enum ip_conntrack_info ctinfo; + enum ip_conntrack_info ctinfo = 0; struct nfnl_ct_hook *nfnl_ct; bool csum_verify; char *secdata = NULL; diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index af4ee874a067..dbe1f2e7dd9e 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -236,7 +236,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr, tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len); if (!tcph) - return; + goto err; opt = (u8 *)tcph; for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) { @@ -251,16 +251,16 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr, continue; if (i + optl > tcphdr_len || priv->len + priv->offset > optl) - return; + goto err; if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + i + priv->len)) - return; + goto err; tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len); if (!tcph) - return; + goto err; offset = i + priv->offset; @@ -303,6 +303,9 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr, return; } + return; +err: + regs->verdict.code = NFT_BREAK; } static void nft_exthdr_sctp_eval(const struct nft_expr *expr, diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c index e517663e0cd1..6f4116e72958 100644 --- a/net/netfilter/nft_set_pipapo_avx2.c +++ b/net/netfilter/nft_set_pipapo_avx2.c @@ -886,7 +886,7 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill, NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 4, pkt[4], bsize); NFT_PIPAPO_AVX2_AND(5, 0, 1); - NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 6, pkt[5], bsize); + NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 5, pkt[5], bsize); NFT_PIPAPO_AVX2_AND(7, 2, 3); /* Stall */ diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 1a19d179e913..4be2d97ff93e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1848,6 +1848,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; + if (len == 0) { + pr_warn_once("Zero length message leads to an empty skb\n"); + return -ENODATA; + } + err = scm_send(sock, msg, &scm, true); if (err < 0) return err; diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 334f63c9529e..f184b0db79d4 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -636,8 +636,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; - nfc_device_iter_exit(iter); - kfree(iter); + if (iter) { + nfc_device_iter_exit(iter); + kfree(iter); + } return 0; } @@ -1392,8 +1394,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; - nfc_device_iter_exit(iter); - kfree(iter); + if (iter) { + nfc_device_iter_exit(iter); + kfree(iter); + } return 0; } diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 8e1a88f13622..b498dac4e1e0 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -82,7 +82,7 @@ struct vport *ovs_netdev_link(struct vport *vport, const char *name) err = -ENODEV; goto error_free_vport; } - + netdev_tracker_alloc(vport->dev, &vport->dev_tracker, GFP_KERNEL); if (vport->dev->flags & IFF_LOOPBACK || (vport->dev->type != ARPHRD_ETHER && vport->dev->type != ARPHRD_NONE) || @@ -115,7 +115,7 @@ error_master_upper_dev_unlink: error_unlock: rtnl_unlock(); error_put: - dev_put(vport->dev); + dev_put_track(vport->dev, &vport->dev_tracker); error_free_vport: ovs_vport_free(vport); return ERR_PTR(err); @@ -137,8 +137,7 @@ static void vport_netdev_free(struct rcu_head *rcu) { struct vport *vport = container_of(rcu, struct vport, rcu); - if (vport->dev) - dev_put(vport->dev); + dev_put_track(vport->dev, &vport->dev_tracker); ovs_vport_free(vport); } @@ -174,7 +173,7 @@ void ovs_netdev_tunnel_destroy(struct vport *vport) */ if (vport->dev->reg_state == NETREG_REGISTERED) rtnl_delete_link(vport->dev); - dev_put(vport->dev); + dev_put_track(vport->dev, &vport->dev_tracker); vport->dev = NULL; rtnl_unlock(); diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 8a930ca6d6b1..9de5030d9801 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -58,6 +58,7 @@ struct vport_portids { /** * struct vport - one port within a datapath * @dev: Pointer to net_device. + * @dev_tracker: refcount tracker for @dev reference * @dp: Datapath to which this port belongs. * @upcall_portids: RCU protected 'struct vport_portids'. * @port_no: Index into @dp's @ports array. @@ -69,6 +70,7 @@ struct vport_portids { */ struct vport { struct net_device *dev; + netdevice_tracker dev_tracker; struct datapath *dp; struct vport_portids __rcu *upcall_portids; u16 port_no; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a1ffdb48cc47..71854a16afbb 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3109,7 +3109,7 @@ static int packet_release(struct socket *sock) packet_cached_dev_reset(po); if (po->prot_hook.dev) { - dev_put(po->prot_hook.dev); + dev_put_track(po->prot_hook.dev, &po->prot_hook.dev_tracker); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); @@ -3217,18 +3217,25 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, WRITE_ONCE(po->num, proto); po->prot_hook.type = proto; + dev_put_track(dev_curr, &po->prot_hook.dev_tracker); + dev_curr = NULL; + if (unlikely(unlisted)) { dev_put(dev); po->prot_hook.dev = NULL; WRITE_ONCE(po->ifindex, -1); packet_cached_dev_reset(po); } else { + if (dev) + netdev_tracker_alloc(dev, + &po->prot_hook.dev_tracker, + GFP_ATOMIC); po->prot_hook.dev = dev; WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0); packet_cached_dev_assign(po, dev); } } - dev_put(dev_curr); + dev_put_track(dev_curr, &po->prot_hook.dev_tracker); if (proto == 0 || !need_rehook) goto out_unlock; @@ -4138,7 +4145,8 @@ static int packet_notifier(struct notifier_block *this, if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); WRITE_ONCE(po->ifindex, -1); - dev_put(po->prot_hook.dev); + dev_put_track(po->prot_hook.dev, + &po->prot_hook.dev_tracker); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); diff --git a/net/rds/tcp.c b/net/rds/tcp.c index abf19c0e3ba0..5327d130c4b5 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -500,7 +500,7 @@ void rds_tcp_tune(struct socket *sock) sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } if (rtn->rcvbuf_size > 0) { - sk->sk_sndbuf = rtn->rcvbuf_size; + sk->sk_rcvbuf = rtn->rcvbuf_size; sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } release_sock(sk); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index dbea0bfee48e..8120138dac01 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -135,16 +135,20 @@ struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle) return bundle; } +static void rxrpc_free_bundle(struct rxrpc_bundle *bundle) +{ + rxrpc_put_peer(bundle->params.peer); + kfree(bundle); +} + void rxrpc_put_bundle(struct rxrpc_bundle *bundle) { unsigned int d = bundle->debug_id; unsigned int u = atomic_dec_return(&bundle->usage); _debug("PUT B=%x %u", d, u); - if (u == 0) { - rxrpc_put_peer(bundle->params.peer); - kfree(bundle); - } + if (u == 0) + rxrpc_free_bundle(bundle); } /* @@ -328,7 +332,7 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c return candidate; found_bundle_free: - kfree(candidate); + rxrpc_free_bundle(candidate); found_bundle: rxrpc_get_bundle(bundle); spin_unlock(&local->client_bundles_lock); diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 68396d052052..0298fe2ad6d3 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -299,6 +299,12 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx, return peer; } +static void rxrpc_free_peer(struct rxrpc_peer *peer) +{ + rxrpc_put_local(peer->local); + kfree_rcu(peer, rcu); +} + /* * Set up a new incoming peer. There shouldn't be any other matching peers * since we've already done a search in the list from the non-reentrant context @@ -365,7 +371,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, spin_unlock_bh(&rxnet->peer_hash_lock); if (peer) - kfree(candidate); + rxrpc_free_peer(candidate); else peer = candidate; } @@ -420,8 +426,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer) list_del_init(&peer->keepalive_link); spin_unlock_bh(&rxnet->peer_hash_lock); - rxrpc_put_local(peer->local); - kfree_rcu(peer, rcu); + rxrpc_free_peer(peer); } /* @@ -457,8 +462,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer) if (n == 0) { hash_del_rcu(&peer->hash_link); list_del_init(&peer->keepalive_link); - rxrpc_put_local(peer->local); - kfree_rcu(peer, rcu); + rxrpc_free_peer(peer); } } diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 90866ae45573..ab1810f2e660 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -393,7 +393,8 @@ static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft, { bool tcp = false; - if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) + if ((ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) || + !test_bit(IPS_ASSURED_BIT, &ct->status)) return; switch (nf_ct_protonum(ct)) { diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index efc963ab995a..952416bd65e6 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -79,7 +79,7 @@ static void tcf_mirred_release(struct tc_action *a) /* last reference to action, no need to lock */ dev = rcu_dereference_protected(m->tcfm_dev, 1); - dev_put(dev); + dev_put_track(dev, &m->tcfm_dev_tracker); } static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { @@ -101,7 +101,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, bool mac_header_xmit = false; struct tc_mirred *parm; struct tcf_mirred *m; - struct net_device *dev; bool exists = false; int ret, err; u32 index; @@ -171,16 +170,19 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, spin_lock_bh(&m->tcf_lock); if (parm->ifindex) { - dev = dev_get_by_index(net, parm->ifindex); - if (!dev) { + struct net_device *odev, *ndev; + + ndev = dev_get_by_index(net, parm->ifindex); + if (!ndev) { spin_unlock_bh(&m->tcf_lock); err = -ENODEV; goto put_chain; } - mac_header_xmit = dev_is_mac_header_xmit(dev); - dev = rcu_replace_pointer(m->tcfm_dev, dev, + mac_header_xmit = dev_is_mac_header_xmit(ndev); + odev = rcu_replace_pointer(m->tcfm_dev, ndev, lockdep_is_held(&m->tcf_lock)); - dev_put(dev); + dev_put_track(odev, &m->tcfm_dev_tracker); + netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC); m->tcfm_mac_header_xmit = mac_header_xmit; } goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); @@ -400,7 +402,7 @@ static int mirred_device_event(struct notifier_block *unused, list_for_each_entry(m, &mirred_list, tcfm_list) { spin_lock_bh(&m->tcf_lock); if (tcf_mirred_dev_dereference(m) == dev) { - dev_put(dev); + dev_put_track(dev, &m->tcfm_dev_tracker); /* Note : no rcu grace period necessary, as * net_device are already rcu protected. */ diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index 830f3559f727..d6aba6edd16e 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -531,6 +531,7 @@ static void fq_pie_destroy(struct Qdisc *sch) struct fq_pie_sched_data *q = qdisc_priv(sch); tcf_block_put(q->block); + q->p_params.tupdate = 0; del_timer_sync(&q->adapt_timer); kvfree(q->flows); } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index d33804d41c5c..b07bd1c7330f 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -499,6 +499,7 @@ EXPORT_SYMBOL(netif_tx_unlock); static void dev_watchdog(struct timer_list *t) { struct net_device *dev = from_timer(dev, t, watchdog_timer); + bool release = true; spin_lock(&dev->tx_global_lock); if (!qdisc_tx_is_noop(dev)) { @@ -534,12 +535,13 @@ static void dev_watchdog(struct timer_list *t) if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) - dev_hold(dev); + release = false; } } spin_unlock(&dev->tx_global_lock); - dev_put(dev); + if (release) + dev_put_track(dev, &dev->watchdog_dev_tracker); } void __netdev_watchdog_up(struct net_device *dev) @@ -549,7 +551,7 @@ void __netdev_watchdog_up(struct net_device *dev) dev->watchdog_timeo = 5*HZ; if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) - dev_hold(dev); + dev_hold_track(dev, &dev->watchdog_dev_tracker, GFP_ATOMIC); } } EXPORT_SYMBOL_GPL(__netdev_watchdog_up); @@ -563,7 +565,7 @@ static void dev_watchdog_down(struct net_device *dev) { netif_tx_lock_bh(dev); if (del_timer(&dev->watchdog_timer)) - dev_put(dev); + dev_put_track(dev, &dev->watchdog_dev_tracker); netif_tx_unlock_bh(dev); } @@ -973,7 +975,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, sch->enqueue = ops->enqueue; sch->dequeue = ops->dequeue; sch->dev_queue = dev_queue; - dev_hold(dev); + dev_hold_track(dev, &sch->dev_tracker, GFP_KERNEL); refcount_set(&sch->refcnt, 1); return sch; @@ -1073,7 +1075,7 @@ static void qdisc_destroy(struct Qdisc *qdisc) ops->destroy(qdisc); module_put(ops->owner); - dev_put(qdisc_dev(qdisc)); + dev_put_track(qdisc_dev(qdisc), &qdisc->dev_tracker); trace_qdisc_destroy(qdisc); diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 3715d2f5ad55..292e4d904ab6 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -195,6 +195,7 @@ int smc_close_active(struct smc_sock *smc) int old_state; long timeout; int rc = 0; + int rc1 = 0; timeout = current->flags & PF_EXITING ? 0 : sock_flag(sk, SOCK_LINGER) ? @@ -232,8 +233,11 @@ again: /* actively shutdown clcsock before peer close it, * prevent peer from entering TIME_WAIT state. */ - if (smc->clcsock && smc->clcsock->sk) - rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); + if (smc->clcsock && smc->clcsock->sk) { + rc1 = kernel_sock_shutdown(smc->clcsock, + SHUT_RDWR); + rc = rc ? rc : rc1; + } } else { /* peer event has changed the state */ goto again; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index bb52c8b5f148..85be94cabb01 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -625,18 +625,17 @@ int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb) void smc_lgr_cleanup_early(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; - struct list_head *lgr_list; spinlock_t *lgr_lock; if (!lgr) return; smc_conn_free(conn); - lgr_list = smc_lgr_list_head(lgr, &lgr_lock); + smc_lgr_list_head(lgr, &lgr_lock); spin_lock_bh(lgr_lock); /* do not use this link group for new connections */ - if (!list_empty(lgr_list)) - list_del_init(lgr_list); + if (!list_empty(&lgr->list)) + list_del_init(&lgr->list); spin_unlock_bh(lgr_lock); __smc_lgr_terminate(lgr, true); } @@ -1102,18 +1101,24 @@ static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc, smc_buf_free(lgr, true, rmb_desc); } else { rmb_desc->used = 0; + memset(rmb_desc->cpu_addr, 0, rmb_desc->len); } } static void smc_buf_unuse(struct smc_connection *conn, struct smc_link_group *lgr) { - if (conn->sndbuf_desc) + if (conn->sndbuf_desc) { conn->sndbuf_desc->used = 0; - if (conn->rmb_desc && lgr->is_smcd) + memset(conn->sndbuf_desc->cpu_addr, 0, conn->sndbuf_desc->len); + } + if (conn->rmb_desc && lgr->is_smcd) { conn->rmb_desc->used = 0; - else if (conn->rmb_desc) + memset(conn->rmb_desc->cpu_addr, 0, conn->rmb_desc->len + + sizeof(struct smcd_cdc_msg)); + } else if (conn->rmb_desc) { smcr_buf_unuse(conn->rmb_desc, lgr); + } } /* remove a finished connection from its link group */ @@ -2149,7 +2154,6 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) if (buf_desc) { SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize); SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb); - memset(buf_desc->cpu_addr, 0, bufsize); break; /* found reusable slot */ } diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 67e9d9fde085..e171cc6483f8 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -64,6 +64,7 @@ struct smc_pnetentry { struct { char eth_name[IFNAMSIZ + 1]; struct net_device *ndev; + netdevice_tracker dev_tracker; }; struct { char ib_name[IB_DEVICE_NAME_MAX + 1]; @@ -119,7 +120,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) smc_pnet_match(pnetelem->pnet_name, pnet_name)) { list_del(&pnetelem->list); if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev) { - dev_put(pnetelem->ndev); + dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker); pr_warn_ratelimited("smc: net device %s " "erased user defined " "pnetid %.16s\n", @@ -195,7 +196,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev) list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev && !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) { - dev_hold(ndev); + dev_hold_track(ndev, &pnetelem->dev_tracker, GFP_ATOMIC); pnetelem->ndev = ndev; rc = 0; pr_warn_ratelimited("smc: adding net device %s with " @@ -226,7 +227,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev) write_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) { - dev_put(pnetelem->ndev); + dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker); pnetelem->ndev = NULL; rc = 0; pr_warn_ratelimited("smc: removing net device %s with " @@ -368,7 +369,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN); strncpy(new_pe->eth_name, eth_name, IFNAMSIZ); new_pe->ndev = ndev; - + netdev_tracker_alloc(ndev, &new_pe->dev_tracker, GFP_KERNEL); rc = -EEXIST; new_netdev = true; write_lock(&pnettable->lock); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index ae48c9c84ee1..d8ee06a9650a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1720,15 +1720,15 @@ static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port) } #ifdef CONFIG_DEBUG_LOCK_ALLOC -static struct lock_class_key xs_key[2]; -static struct lock_class_key xs_slock_key[2]; +static struct lock_class_key xs_key[3]; +static struct lock_class_key xs_slock_key[3]; static inline void xs_reclassify_socketu(struct socket *sock) { struct sock *sk = sock->sk; sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", - &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]); + &xs_slock_key[0], "sk_lock-AF_LOCAL-RPC", &xs_key[0]); } static inline void xs_reclassify_socket4(struct socket *sock) @@ -1736,7 +1736,7 @@ static inline void xs_reclassify_socket4(struct socket *sock) struct sock *sk = sock->sk; sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", - &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); + &xs_slock_key[1], "sk_lock-AF_INET-RPC", &xs_key[1]); } static inline void xs_reclassify_socket6(struct socket *sock) @@ -1744,7 +1744,7 @@ static inline void xs_reclassify_socket6(struct socket *sock) struct sock *sk = sock->sk; sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", - &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); + &xs_slock_key[2], "sk_lock-AF_INET6-RPC", &xs_key[2]); } static inline void xs_reclassify_socket(int family, struct socket *sock) diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 83460470e883..b62565278fac 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -28,6 +28,7 @@ typedef void switchdev_deferred_func_t(struct net_device *dev, struct switchdev_deferred_item { struct list_head list; struct net_device *dev; + netdevice_tracker dev_tracker; switchdev_deferred_func_t *func; unsigned long data[]; }; @@ -63,7 +64,7 @@ void switchdev_deferred_process(void) while ((dfitem = switchdev_deferred_dequeue())) { dfitem->func(dfitem->dev, dfitem->data); - dev_put(dfitem->dev); + dev_put_track(dfitem->dev, &dfitem->dev_tracker); kfree(dfitem); } } @@ -90,7 +91,7 @@ static int switchdev_deferred_enqueue(struct net_device *dev, dfitem->dev = dev; dfitem->func = func; memcpy(dfitem->data, data, data_len); - dev_hold(dev); + dev_hold_track(dev, &dfitem->dev_tracker, GFP_ATOMIC); spin_lock_bh(&deferred_lock); list_add_tail(&dfitem->list, &deferred); spin_unlock_bh(&deferred_lock); diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 60bc74b76adc..473a790f5894 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -787,7 +787,7 @@ int tipc_attach_loopback(struct net *net) if (!dev) return -ENODEV; - dev_hold(dev); + dev_hold_track(dev, &tn->loopback_pt.dev_tracker, GFP_KERNEL); tn->loopback_pt.dev = dev; tn->loopback_pt.type = htons(ETH_P_TIPC); tn->loopback_pt.func = tipc_loopback_rcv_pkt; @@ -800,7 +800,7 @@ void tipc_detach_loopback(struct net *net) struct tipc_net *tn = tipc_net(net); dev_remove_pack(&tn->loopback_pt); - dev_put(net->loopback_dev); + dev_put_track(net->loopback_dev, &tn->loopback_pt.dev_tracker); } /* Caller should hold rtnl_lock to protect the bearer */ diff --git a/net/tipc/link.c b/net/tipc/link.c index 09ae8448f394..8d9e09f48f4c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1298,7 +1298,8 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, return false; #ifdef CONFIG_TIPC_CRYPTO case MSG_CRYPTO: - if (TIPC_SKB_CB(skb)->decrypted) { + if (sysctl_tipc_key_exchange_enabled && + TIPC_SKB_CB(skb)->decrypted) { tipc_crypto_msg_rcv(l->net, skb); return true; } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 7404d094f2d1..3f271e29812f 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -521,7 +521,7 @@ static int tls_do_encryption(struct sock *sk, memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, prot->iv_size + prot->salt_size); - xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq); + xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq); sge->offset += prot->prepend_size; sge->length -= prot->prepend_size; @@ -1499,7 +1499,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, else memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size); - xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq); + xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq); /* Prepare AAD */ tls_make_aad(aad, rxm->full_len - prot->overhead_size + diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c index c09bea89151b..01d44e2598e2 100644 --- a/net/unix/sysctl_net_unix.c +++ b/net/unix/sysctl_net_unix.c @@ -30,10 +30,6 @@ int __net_init unix_sysctl_register(struct net *net) if (table == NULL) goto err_alloc; - /* Don't export sysctls to unprivileged users */ - if (net->user_ns != &init_user_ns) - table[0].procname = NULL; - table[0].data = &net->unx.sysctl_max_dgram_qlen; net->unx.ctl = register_net_sysctl(net, "net/unix", table); if (net->unx.ctl == NULL) diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index 19189cf30a72..e111e13b6660 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c @@ -225,14 +225,20 @@ static size_t hvs_channel_writable_bytes(struct vmbus_channel *chan) return round_down(ret, 8); } +static int __hvs_send_data(struct vmbus_channel *chan, + struct vmpipe_proto_header *hdr, + size_t to_write) +{ + hdr->pkt_type = 1; + hdr->data_size = to_write; + return vmbus_sendpacket(chan, hdr, sizeof(*hdr) + to_write, + 0, VM_PKT_DATA_INBAND, 0); +} + static int hvs_send_data(struct vmbus_channel *chan, struct hvs_send_buf *send_buf, size_t to_write) { - send_buf->hdr.pkt_type = 1; - send_buf->hdr.data_size = to_write; - return vmbus_sendpacket(chan, &send_buf->hdr, - sizeof(send_buf->hdr) + to_write, - 0, VM_PKT_DATA_INBAND, 0); + return __hvs_send_data(chan, &send_buf->hdr, to_write); } static void hvs_channel_cb(void *ctx) @@ -468,7 +474,7 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode) return; /* It can't fail: see hvs_channel_writable_bytes(). */ - (void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0); + (void)__hvs_send_data(hvs->chan, &hdr, 0); hvs->fin_sent = true; } diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 3f82b2f1e6dd..4f7c99dfd16c 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -731,7 +731,6 @@ static unsigned int features[] = { static struct virtio_driver virtio_vsock_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), - .suppress_used_validation = true, .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index e1c4197af468..b981a4828d08 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -41,7 +41,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) return 0; } - if (!more && x25->fraglen > 0) { /* End of fragment */ + if (x25->fraglen > 0) { /* End of fragment */ int len = x25->fraglen + skb->len; if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){ diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index e843b0d9e2a6..3fa066419d37 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -259,6 +259,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, } xso->dev = dev; + netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); xso->real_dev = dev; xso->num_exthdrs = 1; xso->flags = xuo->flags; @@ -269,7 +270,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, xso->flags = 0; xso->dev = NULL; xso->real_dev = NULL; - dev_put(dev); + dev_put_track(dev, &xso->dev_tracker); if (err != -EOPNOTSUPP) return err; diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index a886dff1ba89..38638845db9d 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -215,6 +215,11 @@ TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib endif TPROGS_LDLIBS += $(LIBBPF) -lelf -lz +TPROGLDLIBS_xdp_monitor += -lm +TPROGLDLIBS_xdp_redirect += -lm +TPROGLDLIBS_xdp_redirect_cpu += -lm +TPROGLDLIBS_xdp_redirect_map += -lm +TPROGLDLIBS_xdp_redirect_map_multi += -lm TPROGLDLIBS_tracex4 += -lrt TPROGLDLIBS_trace_output += -lrt TPROGLDLIBS_map_perf_test += -lrt @@ -328,7 +333,7 @@ $(BPF_SAMPLES_PATH)/*.c: verify_target_bpf $(LIBBPF) $(src)/*.c: verify_target_bpf $(LIBBPF) libbpf_hdrs: $(LIBBPF) -$(obj)/$(TRACE_HELPERS): | libbpf_hdrs +$(obj)/$(TRACE_HELPERS) $(obj)/$(CGROUP_HELPERS) $(obj)/$(XDP_SAMPLE): | libbpf_hdrs .PHONY: libbpf_hdrs @@ -343,6 +348,17 @@ $(obj)/hbm_out_kern.o: $(src)/hbm.h $(src)/hbm_kern.h $(obj)/hbm.o: $(src)/hbm.h $(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h +# Override includes for xdp_sample_user.o because $(srctree)/usr/include in +# TPROGS_CFLAGS causes conflicts +XDP_SAMPLE_CFLAGS += -Wall -O2 \ + -I$(src)/../../tools/include \ + -I$(src)/../../tools/include/uapi \ + -I$(LIBBPF_INCLUDE) \ + -I$(src)/../../tools/testing/selftests/bpf + +$(obj)/$(XDP_SAMPLE): TPROGS_CFLAGS = $(XDP_SAMPLE_CFLAGS) +$(obj)/$(XDP_SAMPLE): $(src)/xdp_sample_user.h $(src)/xdp_sample_shared.h + -include $(BPF_SAMPLES_PATH)/Makefile.target VMLINUX_BTF_PATHS ?= $(abspath $(if $(O),$(O)/vmlinux)) \ diff --git a/samples/bpf/Makefile.target b/samples/bpf/Makefile.target index 5a368affa038..7621f55e2947 100644 --- a/samples/bpf/Makefile.target +++ b/samples/bpf/Makefile.target @@ -73,14 +73,3 @@ quiet_cmd_tprog-cobjs = CC $@ cmd_tprog-cobjs = $(CC) $(tprogc_flags) -c -o $@ $< $(tprog-cobjs): $(obj)/%.o: $(src)/%.c FORCE $(call if_changed_dep,tprog-cobjs) - -# Override includes for xdp_sample_user.o because $(srctree)/usr/include in -# TPROGS_CFLAGS causes conflicts -XDP_SAMPLE_CFLAGS += -Wall -O2 -lm \ - -I./tools/include \ - -I./tools/include/uapi \ - -I./tools/lib \ - -I./tools/testing/selftests/bpf -$(obj)/xdp_sample_user.o: $(src)/xdp_sample_user.c \ - $(src)/xdp_sample_user.h $(src)/xdp_sample_shared.h - $(CC) $(XDP_SAMPLE_CFLAGS) -c -o $@ $< diff --git a/samples/bpf/cookie_uid_helper_example.c b/samples/bpf/cookie_uid_helper_example.c index 54958802c032..f0df3dda4b1f 100644 --- a/samples/bpf/cookie_uid_helper_example.c +++ b/samples/bpf/cookie_uid_helper_example.c @@ -67,8 +67,8 @@ static bool test_finish; static void maps_create(void) { - map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(uint32_t), - sizeof(struct stats), 100, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(uint32_t), + sizeof(struct stats), 100, NULL); if (map_fd < 0) error(1, errno, "map create failed!\n"); } @@ -157,9 +157,13 @@ static void prog_load(void) offsetof(struct __sk_buff, len)), BPF_EXIT_INSN(), }; - prog_fd = bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, - ARRAY_SIZE(prog), "GPL", 0, - log_buf, sizeof(log_buf)); + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_buf = log_buf, + .log_size = sizeof(log_buf), + ); + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", + prog, ARRAY_SIZE(prog), &opts); if (prog_fd < 0) error(1, errno, "failed to load prog\n%s\n", log_buf); } diff --git a/samples/bpf/fds_example.c b/samples/bpf/fds_example.c index 59f45fef5110..16dbf49e0f19 100644 --- a/samples/bpf/fds_example.c +++ b/samples/bpf/fds_example.c @@ -46,12 +46,6 @@ static void usage(void) printf(" -h Display this help.\n"); } -static int bpf_map_create(void) -{ - return bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(uint32_t), - sizeof(uint32_t), 1024, 0); -} - static int bpf_prog_create(const char *object) { static struct bpf_insn insns[] = { @@ -60,16 +54,22 @@ static int bpf_prog_create(const char *object) }; size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn); struct bpf_object *obj; - int prog_fd; + int err; if (object) { - assert(!bpf_prog_load(object, BPF_PROG_TYPE_UNSPEC, - &obj, &prog_fd)); - return prog_fd; + obj = bpf_object__open_file(object, NULL); + assert(!libbpf_get_error(obj)); + err = bpf_object__load(obj); + assert(!err); + return bpf_program__fd(bpf_object__next_program(obj, NULL)); } else { - return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, - insns, insns_cnt, "GPL", 0, - bpf_log_buf, BPF_LOG_BUF_SIZE); + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_buf = bpf_log_buf, + .log_size = BPF_LOG_BUF_SIZE, + ); + + return bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", + insns, insns_cnt, &opts); } } @@ -79,7 +79,8 @@ static int bpf_do_map(const char *file, uint32_t flags, uint32_t key, int fd, ret; if (flags & BPF_F_PIN) { - fd = bpf_map_create(); + fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(uint32_t), + sizeof(uint32_t), 1024, NULL); printf("bpf: map fd:%d (%s)\n", fd, strerror(errno)); assert(fd > 0); diff --git a/samples/bpf/lwt_len_hist_kern.c b/samples/bpf/lwt_len_hist_kern.c index 9ed63e10e170..1fa14c54963a 100644 --- a/samples/bpf/lwt_len_hist_kern.c +++ b/samples/bpf/lwt_len_hist_kern.c @@ -16,13 +16,6 @@ #include <uapi/linux/in.h> #include <bpf/bpf_helpers.h> -# define printk(fmt, ...) \ - ({ \ - char ____fmt[] = fmt; \ - bpf_trace_printk(____fmt, sizeof(____fmt), \ - ##__VA_ARGS__); \ - }) - struct bpf_elf_map { __u32 type; __u32 size_key; diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c index 9db949290a78..319fd31522f3 100644 --- a/samples/bpf/map_perf_test_user.c +++ b/samples/bpf/map_perf_test_user.c @@ -134,19 +134,22 @@ static void do_test_lru(enum test_type test, int cpu) */ int outer_fd = map_fd[array_of_lru_hashs_idx]; unsigned int mycpu, mynode; + LIBBPF_OPTS(bpf_map_create_opts, opts, + .map_flags = BPF_F_NUMA_NODE, + ); assert(cpu < MAX_NR_CPUS); ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL); assert(!ret); + opts.numa_node = mynode; inner_lru_map_fds[cpu] = - bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH, - test_map_names[INNER_LRU_HASH_PREALLOC], - sizeof(uint32_t), - sizeof(long), - inner_lru_hash_size, 0, - mynode); + bpf_map_create(BPF_MAP_TYPE_LRU_HASH, + test_map_names[INNER_LRU_HASH_PREALLOC], + sizeof(uint32_t), + sizeof(long), + inner_lru_hash_size, &opts); if (inner_lru_map_fds[cpu] == -1) { printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n", strerror(errno), errno); diff --git a/samples/bpf/sock_example.c b/samples/bpf/sock_example.c index 23d1930e1927..a88f69504c08 100644 --- a/samples/bpf/sock_example.c +++ b/samples/bpf/sock_example.c @@ -37,8 +37,8 @@ static int test_sock(void) int sock = -1, map_fd, prog_fd, i, key; long long value = 0, tcp_cnt, udp_cnt, icmp_cnt; - map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), - 256, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(key), sizeof(value), + 256, NULL); if (map_fd < 0) { printf("failed to create map '%s'\n", strerror(errno)); goto cleanup; @@ -59,9 +59,13 @@ static int test_sock(void) BPF_EXIT_INSN(), }; size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_buf = bpf_log_buf, + .log_size = BPF_LOG_BUF_SIZE, + ); - prog_fd = bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, insns_cnt, - "GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE); + prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", + prog, insns_cnt, &opts); if (prog_fd < 0) { printf("failed to load prog '%s'\n", strerror(errno)); goto cleanup; diff --git a/samples/bpf/sockex1_user.c b/samples/bpf/sockex1_user.c index 3c83722877dc..9e8d39e245c1 100644 --- a/samples/bpf/sockex1_user.c +++ b/samples/bpf/sockex1_user.c @@ -11,17 +11,26 @@ int main(int ac, char **argv) { struct bpf_object *obj; + struct bpf_program *prog; int map_fd, prog_fd; char filename[256]; - int i, sock; + int i, sock, err; FILE *f; snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - if (bpf_prog_load(filename, BPF_PROG_TYPE_SOCKET_FILTER, - &obj, &prog_fd)) + obj = bpf_object__open_file(filename, NULL); + if (libbpf_get_error(obj)) return 1; + prog = bpf_object__next_program(obj, NULL); + bpf_program__set_type(prog, BPF_PROG_TYPE_SOCKET_FILTER); + + err = bpf_object__load(obj); + if (err) + return 1; + + prog_fd = bpf_program__fd(prog); map_fd = bpf_object__find_map_fd_by_name(obj, "my_map"); sock = open_raw_sock("lo"); diff --git a/samples/bpf/sockex2_user.c b/samples/bpf/sockex2_user.c index bafa567b840c..6a3fd369d3fc 100644 --- a/samples/bpf/sockex2_user.c +++ b/samples/bpf/sockex2_user.c @@ -16,18 +16,26 @@ struct pair { int main(int ac, char **argv) { + struct bpf_program *prog; struct bpf_object *obj; int map_fd, prog_fd; char filename[256]; - int i, sock; + int i, sock, err; FILE *f; snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + obj = bpf_object__open_file(filename, NULL); + if (libbpf_get_error(obj)) + return 1; + + prog = bpf_object__next_program(obj, NULL); + bpf_program__set_type(prog, BPF_PROG_TYPE_SOCKET_FILTER); - if (bpf_prog_load(filename, BPF_PROG_TYPE_SOCKET_FILTER, - &obj, &prog_fd)) + err = bpf_object__load(obj); + if (err) return 1; + prog_fd = bpf_program__fd(prog); map_fd = bpf_object__find_map_fd_by_name(obj, "hash_map"); sock = open_raw_sock("lo"); diff --git a/samples/bpf/test_cgrp2_array_pin.c b/samples/bpf/test_cgrp2_array_pin.c index 6d564aa75447..05e88aa63009 100644 --- a/samples/bpf/test_cgrp2_array_pin.c +++ b/samples/bpf/test_cgrp2_array_pin.c @@ -64,9 +64,9 @@ int main(int argc, char **argv) } if (create_array) { - array_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_ARRAY, + array_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_ARRAY, NULL, sizeof(uint32_t), sizeof(uint32_t), - 1, 0); + 1, NULL); if (array_fd < 0) { fprintf(stderr, "bpf_create_map(BPF_MAP_TYPE_CGROUP_ARRAY,...): %s(%d)\n", diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c index 390ff38d2ac6..6d90874b09c3 100644 --- a/samples/bpf/test_cgrp2_attach.c +++ b/samples/bpf/test_cgrp2_attach.c @@ -71,10 +71,13 @@ static int prog_load(int map_fd, int verdict) BPF_EXIT_INSN(), }; size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_buf = bpf_log_buf, + .log_size = BPF_LOG_BUF_SIZE, + ); - return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, - prog, insns_cnt, "GPL", 0, - bpf_log_buf, BPF_LOG_BUF_SIZE); + return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB, NULL, "GPL", + prog, insns_cnt, &opts); } static int usage(const char *argv0) @@ -90,9 +93,9 @@ static int attach_filter(int cg_fd, int type, int verdict) int prog_fd, map_fd, ret, key; long long pkt_cnt, byte_cnt; - map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(key), sizeof(byte_cnt), - 256, 0); + 256, NULL); if (map_fd < 0) { printf("Failed to create map: '%s'\n", strerror(errno)); return EXIT_FAILURE; diff --git a/samples/bpf/test_cgrp2_sock.c b/samples/bpf/test_cgrp2_sock.c index b0811da5a00f..a0811df888f4 100644 --- a/samples/bpf/test_cgrp2_sock.c +++ b/samples/bpf/test_cgrp2_sock.c @@ -70,6 +70,10 @@ static int prog_load(__u32 idx, __u32 mark, __u32 prio) BPF_MOV64_IMM(BPF_REG_2, offsetof(struct bpf_sock, priority)), BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, offsetof(struct bpf_sock, priority)), }; + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_buf = bpf_log_buf, + .log_size = BPF_LOG_BUF_SIZE, + ); struct bpf_insn *prog; size_t insns_cnt; @@ -115,8 +119,8 @@ static int prog_load(__u32 idx, __u32 mark, __u32 prio) insns_cnt /= sizeof(struct bpf_insn); - ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SOCK, prog, insns_cnt, - "GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE); + ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", + prog, insns_cnt, &opts); free(prog); diff --git a/samples/bpf/test_lru_dist.c b/samples/bpf/test_lru_dist.c index c92c5c06b965..75e877853596 100644 --- a/samples/bpf/test_lru_dist.c +++ b/samples/bpf/test_lru_dist.c @@ -105,10 +105,10 @@ struct pfect_lru { static void pfect_lru_init(struct pfect_lru *lru, unsigned int lru_size, unsigned int nr_possible_elems) { - lru->map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, + lru->map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(unsigned long long), sizeof(struct pfect_lru_node *), - nr_possible_elems, 0); + nr_possible_elems, NULL); assert(lru->map_fd != -1); lru->free_nodes = malloc(lru_size * sizeof(struct pfect_lru_node)); @@ -207,10 +207,13 @@ static unsigned int read_keys(const char *dist_file, static int create_map(int map_type, int map_flags, unsigned int size) { + LIBBPF_OPTS(bpf_map_create_opts, opts, + .map_flags = map_flags, + ); int map_fd; - map_fd = bpf_create_map(map_type, sizeof(unsigned long long), - sizeof(unsigned long long), size, map_flags); + map_fd = bpf_map_create(map_type, NULL, sizeof(unsigned long long), + sizeof(unsigned long long), size, &opts); if (map_fd == -1) perror("bpf_create_map"); diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c index 364b98764d54..371732f9cf8e 100644 --- a/samples/bpf/trace_output_user.c +++ b/samples/bpf/trace_output_user.c @@ -43,7 +43,6 @@ static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size) int main(int argc, char **argv) { - struct perf_buffer_opts pb_opts = {}; struct bpf_link *link = NULL; struct bpf_program *prog; struct perf_buffer *pb; @@ -84,8 +83,7 @@ int main(int argc, char **argv) goto cleanup; } - pb_opts.sample_cb = print_bpf_output; - pb = perf_buffer__new(map_fd, 8, &pb_opts); + pb = perf_buffer__new(map_fd, 8, print_bpf_output, NULL, NULL, NULL); ret = libbpf_get_error(pb); if (ret) { printf("failed to setup perf_buffer: %d\n", ret); diff --git a/samples/bpf/xdp_redirect_cpu.bpf.c b/samples/bpf/xdp_redirect_cpu.bpf.c index f10fe3cf25f6..25e3a405375f 100644 --- a/samples/bpf/xdp_redirect_cpu.bpf.c +++ b/samples/bpf/xdp_redirect_cpu.bpf.c @@ -100,7 +100,6 @@ u16 get_dest_port_ipv4_udp(struct xdp_md *ctx, u64 nh_off) void *data = (void *)(long)ctx->data; struct iphdr *iph = data + nh_off; struct udphdr *udph; - u16 dport; if (iph + 1 > data_end) return 0; @@ -111,8 +110,7 @@ u16 get_dest_port_ipv4_udp(struct xdp_md *ctx, u64 nh_off) if (udph + 1 > data_end) return 0; - dport = bpf_ntohs(udph->dest); - return dport; + return bpf_ntohs(udph->dest); } static __always_inline diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c index f4382ccdcbb1..587eacb49103 100644 --- a/samples/bpf/xdp_sample_pkts_user.c +++ b/samples/bpf/xdp_sample_pkts_user.c @@ -110,12 +110,9 @@ static void usage(const char *prog) int main(int argc, char **argv) { - struct bpf_prog_load_attr prog_load_attr = { - .prog_type = BPF_PROG_TYPE_XDP, - }; - struct perf_buffer_opts pb_opts = {}; const char *optstr = "FS"; int prog_fd, map_fd, opt; + struct bpf_program *prog; struct bpf_object *obj; struct bpf_map *map; char filename[256]; @@ -144,15 +141,19 @@ int main(int argc, char **argv) } snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - prog_load_attr.file = filename; - if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) + obj = bpf_object__open_file(filename, NULL); + if (libbpf_get_error(obj)) return 1; - if (!prog_fd) { - printf("bpf_prog_load_xattr: %s\n", strerror(errno)); + prog = bpf_object__next_program(obj, NULL); + bpf_program__set_type(prog, BPF_PROG_TYPE_XDP); + + err = bpf_object__load(obj); + if (err) return 1; - } + + prog_fd = bpf_program__fd(prog); map = bpf_object__next_map(obj, NULL); if (!map) { @@ -181,8 +182,7 @@ int main(int argc, char **argv) return 1; } - pb_opts.sample_cb = print_bpf_output; - pb = perf_buffer__new(map_fd, 8, &pb_opts); + pb = perf_buffer__new(map_fd, 8, print_bpf_output, NULL, NULL, NULL); err = libbpf_get_error(pb); if (err) { perror("perf_buffer setup failed"); diff --git a/samples/bpf/xdp_sample_user.h b/samples/bpf/xdp_sample_user.h index d97465ff8c62..5f44b877ecf5 100644 --- a/samples/bpf/xdp_sample_user.h +++ b/samples/bpf/xdp_sample_user.h @@ -45,7 +45,9 @@ const char *get_driver_name(int ifindex); int get_mac_addr(int ifindex, void *mac_addr); #pragma GCC diagnostic push +#ifndef __clang__ #pragma GCC diagnostic ignored "-Wstringop-truncation" +#endif __attribute__((unused)) static inline char *safe_strncpy(char *dst, const char *src, size_t size) { diff --git a/samples/bpf/xdpsock_ctrl_proc.c b/samples/bpf/xdpsock_ctrl_proc.c index 384e62e3c6d6..cc4408797ab7 100644 --- a/samples/bpf/xdpsock_ctrl_proc.c +++ b/samples/bpf/xdpsock_ctrl_proc.c @@ -15,6 +15,9 @@ #include <bpf/xsk.h> #include "xdpsock.h" +/* libbpf APIs for AF_XDP are deprecated starting from v0.7 */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + static const char *opt_if = ""; static struct option long_options[] = { diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 49d7a6ad7e39..616d663d55aa 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -36,6 +36,9 @@ #include <bpf/bpf.h> #include "xdpsock.h" +/* libbpf APIs for AF_XDP are deprecated starting from v0.7 */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + #ifndef SOL_XDP #define SOL_XDP 283 #endif diff --git a/samples/bpf/xsk_fwd.c b/samples/bpf/xsk_fwd.c index 1cd97c84c337..52e7c4ffd228 100644 --- a/samples/bpf/xsk_fwd.c +++ b/samples/bpf/xsk_fwd.c @@ -27,6 +27,9 @@ #include <bpf/xsk.h> #include <bpf/bpf.h> +/* libbpf APIs for AF_XDP are deprecated starting from v0.7 */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) typedef __u64 u64; diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index 10a0bffc3cf6..4208fa8a4db5 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -252,6 +252,11 @@ static const struct config_entry config_table[] = { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = 0x02c8, }, + { + .flags = FLAG_SOF, + .device = 0x02c8, + .codec_hid = "ESSX8336", + }, /* Cometlake-H */ { .flags = FLAG_SOF, @@ -276,6 +281,11 @@ static const struct config_entry config_table[] = { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = 0x06c8, }, + { + .flags = FLAG_SOF, + .device = 0x06c8, + .codec_hid = "ESSX8336", + }, #endif /* Icelake */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index fe51163f2d82..1b46b599a5cf 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -335,7 +335,10 @@ enum { ((pci)->device == 0x0c0c) || \ ((pci)->device == 0x0d0c) || \ ((pci)->device == 0x160c) || \ - ((pci)->device == 0x490d)) + ((pci)->device == 0x490d) || \ + ((pci)->device == 0x4f90) || \ + ((pci)->device == 0x4f91) || \ + ((pci)->device == 0x4f92)) #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) @@ -2473,6 +2476,13 @@ static const struct pci_device_id azx_ids[] = { /* DG1 */ { PCI_DEVICE(0x8086, 0x490d), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* DG2 */ + { PCI_DEVICE(0x8086, 0x4f90), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + { PCI_DEVICE(0x8086, 0x4f91), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + { PCI_DEVICE(0x8086, 0x4f92), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Alderlake-S */ { PCI_DEVICE(0x8086, 0x7ad0), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index ea8ab8b43337..d22c96eb2f8f 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h @@ -438,6 +438,15 @@ int snd_hda_codec_set_pin_target(struct hda_codec *codec, hda_nid_t nid, #define for_each_hda_codec_node(nid, codec) \ for ((nid) = (codec)->core.start_nid; (nid) < (codec)->core.end_nid; (nid)++) +/* Set the codec power_state flag to indicate to allow unsol event handling; + * see hda_codec_unsol_event() in hda_bind.c. Calling this might confuse the + * state tracking, so use with care. + */ +static inline void snd_hda_codec_allow_unsol_events(struct hda_codec *codec) +{ + codec->core.dev.power.power_state = PMSG_ON; +} + /* * get widget capabilities */ diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c index 31ff11ab868e..039b9f2f8e94 100644 --- a/sound/pci/hda/patch_cs8409.c +++ b/sound/pci/hda/patch_cs8409.c @@ -750,6 +750,11 @@ static void cs42l42_resume(struct sub_codec *cs42l42) if (cs42l42->full_scale_vol) cs8409_i2c_write(cs42l42, 0x2001, 0x01); + /* we have to explicitly allow unsol event handling even during the + * resume phase so that the jack event is processed properly + */ + snd_hda_codec_allow_unsol_events(cs42l42->codec); + cs42l42_enable_jack_detect(cs42l42); } diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 65d2c5539919..415701bd10ac 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4380,10 +4380,11 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI", patch_i915_tgl_hdmi), -HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi), +HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi), +HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), diff --git a/sound/soc/codecs/cs35l41-spi.c b/sound/soc/codecs/cs35l41-spi.c index 90a921f726c3..3fa99741779a 100644 --- a/sound/soc/codecs/cs35l41-spi.c +++ b/sound/soc/codecs/cs35l41-spi.c @@ -42,34 +42,6 @@ static const struct spi_device_id cs35l41_id_spi[] = { MODULE_DEVICE_TABLE(spi, cs35l41_id_spi); -static void cs35l41_spi_otp_setup(struct cs35l41_private *cs35l41, - bool is_pre_setup, unsigned int *freq) -{ - struct spi_device *spi; - u32 orig_spi_freq; - - spi = to_spi_device(cs35l41->dev); - - if (!spi) { - dev_err(cs35l41->dev, "%s: No SPI device\n", __func__); - return; - } - - if (is_pre_setup) { - orig_spi_freq = spi->max_speed_hz; - if (orig_spi_freq > CS35L41_SPI_MAX_FREQ_OTP) { - spi->max_speed_hz = CS35L41_SPI_MAX_FREQ_OTP; - spi_setup(spi); - } - *freq = orig_spi_freq; - } else { - if (spi->max_speed_hz != *freq) { - spi->max_speed_hz = *freq; - spi_setup(spi); - } - } -} - static int cs35l41_spi_probe(struct spi_device *spi) { const struct regmap_config *regmap_config = &cs35l41_regmap_spi; @@ -81,6 +53,9 @@ static int cs35l41_spi_probe(struct spi_device *spi) if (!cs35l41) return -ENOMEM; + spi->max_speed_hz = CS35L41_SPI_MAX_FREQ; + spi_setup(spi); + spi_set_drvdata(spi, cs35l41); cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config); if (IS_ERR(cs35l41->regmap)) { @@ -91,7 +66,6 @@ static int cs35l41_spi_probe(struct spi_device *spi) cs35l41->dev = &spi->dev; cs35l41->irq = spi->irq; - cs35l41->otp_setup = cs35l41_spi_otp_setup; return cs35l41_probe(cs35l41, pdata); } diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c index 9d0530dde996..9c4d481f7614 100644 --- a/sound/soc/codecs/cs35l41.c +++ b/sound/soc/codecs/cs35l41.c @@ -302,7 +302,6 @@ static int cs35l41_otp_unpack(void *data) const struct cs35l41_otp_packed_element_t *otp_map; struct cs35l41_private *cs35l41 = data; int bit_offset, word_offset, ret, i; - unsigned int orig_spi_freq; unsigned int bit_sum = 8; u32 otp_val, otp_id_reg; u32 *otp_mem; @@ -326,9 +325,6 @@ static int cs35l41_otp_unpack(void *data) goto err_otp_unpack; } - if (cs35l41->otp_setup) - cs35l41->otp_setup(cs35l41, true, &orig_spi_freq); - ret = regmap_bulk_read(cs35l41->regmap, CS35L41_OTP_MEM0, otp_mem, CS35L41_OTP_SIZE_WORDS); if (ret < 0) { @@ -336,9 +332,6 @@ static int cs35l41_otp_unpack(void *data) goto err_otp_unpack; } - if (cs35l41->otp_setup) - cs35l41->otp_setup(cs35l41, false, &orig_spi_freq); - otp_map = otp_map_match->map; bit_offset = otp_map_match->bit_offset; diff --git a/sound/soc/codecs/cs35l41.h b/sound/soc/codecs/cs35l41.h index 6cffe8a55beb..48485b08a6f1 100644 --- a/sound/soc/codecs/cs35l41.h +++ b/sound/soc/codecs/cs35l41.h @@ -726,7 +726,7 @@ #define CS35L41_FS2_WINDOW_MASK 0x00FFF800 #define CS35L41_FS2_WINDOW_SHIFT 12 -#define CS35L41_SPI_MAX_FREQ_OTP 4000000 +#define CS35L41_SPI_MAX_FREQ 4000000 #define CS35L41_RX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) #define CS35L41_TX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) @@ -764,8 +764,6 @@ struct cs35l41_private { int irq; /* GPIO for /RST */ struct gpio_desc *reset_gpio; - void (*otp_setup)(struct cs35l41_private *cs35l41, bool is_pre_setup, - unsigned int *freq); }; int cs35l41_probe(struct cs35l41_private *cs35l41, diff --git a/sound/soc/codecs/rk817_codec.c b/sound/soc/codecs/rk817_codec.c index 943d7d933e81..03f24edfe4f6 100644 --- a/sound/soc/codecs/rk817_codec.c +++ b/sound/soc/codecs/rk817_codec.c @@ -539,3 +539,4 @@ module_platform_driver(rk817_codec_driver); MODULE_DESCRIPTION("ASoC RK817 codec driver"); MODULE_AUTHOR("binyuan <kevan.lan@rock-chips.com>"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:rk817-codec"); diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c index b4eb0c97edf1..4eebc79d4b48 100644 --- a/sound/soc/intel/common/soc-acpi-intel-cml-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c @@ -81,6 +81,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = { .sof_fw_filename = "sof-cml.ri", .sof_tplg_filename = "sof-cml-da7219-max98390.tplg", }, + { + .id = "ESSX8336", + .drv_name = "sof-essx8336", + .sof_fw_filename = "sof-cml.ri", + .sof_tplg_filename = "sof-cml-es8336.tplg", + }, {}, }; EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines); diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c index 2ae99b49d3f5..cbd7ea48837b 100644 --- a/sound/soc/soc-acpi.c +++ b/sound/soc/soc-acpi.c @@ -20,8 +20,10 @@ static bool snd_soc_acpi_id_present(struct snd_soc_acpi_mach *machine) if (comp_ids) { for (i = 0; i < comp_ids->num_codecs; i++) { - if (acpi_dev_present(comp_ids->codecs[i], NULL, -1)) + if (acpi_dev_present(comp_ids->codecs[i], NULL, -1)) { + strscpy(machine->id, comp_ids->codecs[i], ACPI_ID_LEN); return true; + } } } diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index 568d351b7a4e..2c0d4d06ab36 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -58,6 +58,13 @@ int hda_ctrl_dai_widget_setup(struct snd_soc_dapm_widget *w) return -EINVAL; } + /* DAI already configured, reset it before reconfiguring it */ + if (sof_dai->configured) { + ret = hda_ctrl_dai_widget_free(w); + if (ret < 0) + return ret; + } + config = &sof_dai->dai_config[sof_dai->current_config]; /* diff --git a/sound/soc/tegra/tegra186_dspk.c b/sound/soc/tegra/tegra186_dspk.c index 8ee9a77bd83d..a74c980ee775 100644 --- a/sound/soc/tegra/tegra186_dspk.c +++ b/sound/soc/tegra/tegra186_dspk.c @@ -26,51 +26,162 @@ static const struct reg_default tegra186_dspk_reg_defaults[] = { { TEGRA186_DSPK_CODEC_CTRL, 0x03000000 }, }; -static int tegra186_dspk_get_control(struct snd_kcontrol *kcontrol, +static int tegra186_dspk_get_fifo_th(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); - if (strstr(kcontrol->id.name, "FIFO Threshold")) - ucontrol->value.integer.value[0] = dspk->rx_fifo_th; - else if (strstr(kcontrol->id.name, "OSR Value")) - ucontrol->value.integer.value[0] = dspk->osr_val; - else if (strstr(kcontrol->id.name, "LR Polarity Select")) - ucontrol->value.integer.value[0] = dspk->lrsel; - else if (strstr(kcontrol->id.name, "Channel Select")) - ucontrol->value.integer.value[0] = dspk->ch_sel; - else if (strstr(kcontrol->id.name, "Mono To Stereo")) - ucontrol->value.integer.value[0] = dspk->mono_to_stereo; - else if (strstr(kcontrol->id.name, "Stereo To Mono")) - ucontrol->value.integer.value[0] = dspk->stereo_to_mono; + ucontrol->value.integer.value[0] = dspk->rx_fifo_th; return 0; } -static int tegra186_dspk_put_control(struct snd_kcontrol *kcontrol, +static int tegra186_dspk_put_fifo_th(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); - int val = ucontrol->value.integer.value[0]; - - if (strstr(kcontrol->id.name, "FIFO Threshold")) - dspk->rx_fifo_th = val; - else if (strstr(kcontrol->id.name, "OSR Value")) - dspk->osr_val = val; - else if (strstr(kcontrol->id.name, "LR Polarity Select")) - dspk->lrsel = val; - else if (strstr(kcontrol->id.name, "Channel Select")) - dspk->ch_sel = val; - else if (strstr(kcontrol->id.name, "Mono To Stereo")) - dspk->mono_to_stereo = val; - else if (strstr(kcontrol->id.name, "Stereo To Mono")) - dspk->stereo_to_mono = val; + int value = ucontrol->value.integer.value[0]; + + if (value == dspk->rx_fifo_th) + return 0; + + dspk->rx_fifo_th = value; + + return 1; +} + +static int tegra186_dspk_get_osr_val(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = dspk->osr_val; return 0; } +static int tegra186_dspk_put_osr_val(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dspk->osr_val) + return 0; + + dspk->osr_val = value; + + return 1; +} + +static int tegra186_dspk_get_pol_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = dspk->lrsel; + + return 0; +} + +static int tegra186_dspk_put_pol_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dspk->lrsel) + return 0; + + dspk->lrsel = value; + + return 1; +} + +static int tegra186_dspk_get_ch_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = dspk->ch_sel; + + return 0; +} + +static int tegra186_dspk_put_ch_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dspk->ch_sel) + return 0; + + dspk->ch_sel = value; + + return 1; +} + +static int tegra186_dspk_get_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = dspk->mono_to_stereo; + + return 0; +} + +static int tegra186_dspk_put_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dspk->mono_to_stereo) + return 0; + + dspk->mono_to_stereo = value; + + return 1; +} + +static int tegra186_dspk_get_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = dspk->stereo_to_mono; + + return 0; +} + +static int tegra186_dspk_put_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dspk->stereo_to_mono) + return 0; + + dspk->stereo_to_mono = value; + + return 1; +} + static int __maybe_unused tegra186_dspk_runtime_suspend(struct device *dev) { struct tegra186_dspk *dspk = dev_get_drvdata(dev); @@ -279,17 +390,19 @@ static const struct soc_enum tegra186_dspk_lrsel_enum = static const struct snd_kcontrol_new tegrat186_dspk_controls[] = { SOC_SINGLE_EXT("FIFO Threshold", SND_SOC_NOPM, 0, TEGRA186_DSPK_RX_FIFO_DEPTH - 1, 0, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_fifo_th, tegra186_dspk_put_fifo_th), SOC_ENUM_EXT("OSR Value", tegra186_dspk_osr_enum, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_osr_val, tegra186_dspk_put_osr_val), SOC_ENUM_EXT("LR Polarity Select", tegra186_dspk_lrsel_enum, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_pol_sel, tegra186_dspk_put_pol_sel), SOC_ENUM_EXT("Channel Select", tegra186_dspk_ch_sel_enum, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_ch_sel, tegra186_dspk_put_ch_sel), SOC_ENUM_EXT("Mono To Stereo", tegra186_dspk_mono_conv_enum, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_mono_to_stereo, + tegra186_dspk_put_mono_to_stereo), SOC_ENUM_EXT("Stereo To Mono", tegra186_dspk_stereo_conv_enum, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_stereo_to_mono, + tegra186_dspk_put_stereo_to_mono), }; static const struct snd_soc_component_driver tegra186_dspk_cmpnt = { diff --git a/sound/soc/tegra/tegra210_admaif.c b/sound/soc/tegra/tegra210_admaif.c index bcccdf3ddc52..1a2e868a6220 100644 --- a/sound/soc/tegra/tegra210_admaif.c +++ b/sound/soc/tegra/tegra210_admaif.c @@ -424,46 +424,122 @@ static const struct snd_soc_dai_ops tegra_admaif_dai_ops = { .trigger = tegra_admaif_trigger, }; -static int tegra_admaif_get_control(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int tegra210_admaif_pget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + + ucontrol->value.enumerated.item[0] = + admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg]; + + return 0; +} + +static int tegra210_admaif_pput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg]) + return 0; + + admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value; + + return 1; +} + +static int tegra210_admaif_cget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + + ucontrol->value.enumerated.item[0] = + admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg]; + + return 0; +} + +static int tegra210_admaif_cput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg]) + return 0; + + admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value; + + return 1; +} + +static int tegra210_admaif_pget_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); - long *uctl_val = &ucontrol->value.integer.value[0]; + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; - if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) - *uctl_val = admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg]; - else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) - *uctl_val = admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg]; - else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) - *uctl_val = admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg]; - else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) - *uctl_val = admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg]; + ucontrol->value.enumerated.item[0] = + admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg]; return 0; } -static int tegra_admaif_put_control(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int tegra210_admaif_pput_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg]) + return 0; + + admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value; + + return 1; +} + +static int tegra210_admaif_cget_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); - int value = ucontrol->value.integer.value[0]; + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; - if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) - admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value; - else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) - admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value; - else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) - admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value; - else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) - admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value; + ucontrol->value.enumerated.item[0] = + admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg]; return 0; } +static int tegra210_admaif_cput_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg]) + return 0; + + admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value; + + return 1; +} + static int tegra_admaif_dai_probe(struct snd_soc_dai *dai) { struct tegra_admaif *admaif = snd_soc_dai_get_drvdata(dai); @@ -559,17 +635,21 @@ static const char * const tegra_admaif_mono_conv_text[] = { } #define TEGRA_ADMAIF_CIF_CTRL(reg) \ - NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1,\ - tegra_admaif_get_control, tegra_admaif_put_control, \ + NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1, \ + tegra210_admaif_pget_mono_to_stereo, \ + tegra210_admaif_pput_mono_to_stereo, \ tegra_admaif_mono_conv_text), \ - NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1,\ - tegra_admaif_get_control, tegra_admaif_put_control, \ + NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1, \ + tegra210_admaif_pget_stereo_to_mono, \ + tegra210_admaif_pput_stereo_to_mono, \ tegra_admaif_stereo_conv_text), \ - NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1, \ - tegra_admaif_get_control, tegra_admaif_put_control, \ + NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1, \ + tegra210_admaif_cget_mono_to_stereo, \ + tegra210_admaif_cput_mono_to_stereo, \ tegra_admaif_mono_conv_text), \ - NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1, \ - tegra_admaif_get_control, tegra_admaif_put_control, \ + NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1, \ + tegra210_admaif_cget_stereo_to_mono, \ + tegra210_admaif_cput_stereo_to_mono, \ tegra_admaif_stereo_conv_text) static struct snd_kcontrol_new tegra210_admaif_controls[] = { diff --git a/sound/soc/tegra/tegra210_adx.c b/sound/soc/tegra/tegra210_adx.c index d7c7849c2f92..933c4503fe50 100644 --- a/sound/soc/tegra/tegra210_adx.c +++ b/sound/soc/tegra/tegra210_adx.c @@ -193,6 +193,9 @@ static int tegra210_adx_put_byte_map(struct snd_kcontrol *kcontrol, struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value;; + if (value == bytes_map[mc->reg]) + return 0; + if (value >= 0 && value <= 255) { /* update byte map and enable slot */ bytes_map[mc->reg] = value; diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c index a1989eae2b52..388b815443c7 100644 --- a/sound/soc/tegra/tegra210_ahub.c +++ b/sound/soc/tegra/tegra210_ahub.c @@ -62,6 +62,7 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl, unsigned int *item = uctl->value.enumerated.item; unsigned int value = e->values[item[0]]; unsigned int i, bit_pos, reg_idx = 0, reg_val = 0; + int change = 0; if (item[0] >= e->items) return -EINVAL; @@ -86,12 +87,14 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl, /* Update widget power if state has changed */ if (snd_soc_component_test_bits(cmpnt, update[i].reg, - update[i].mask, update[i].val)) - snd_soc_dapm_mux_update_power(dapm, kctl, item[0], e, - &update[i]); + update[i].mask, + update[i].val)) + change |= snd_soc_dapm_mux_update_power(dapm, kctl, + item[0], e, + &update[i]); } - return 0; + return change; } static struct snd_soc_dai_driver tegra210_ahub_dais[] = { diff --git a/sound/soc/tegra/tegra210_amx.c b/sound/soc/tegra/tegra210_amx.c index af9bddfc3120..689576302ede 100644 --- a/sound/soc/tegra/tegra210_amx.c +++ b/sound/soc/tegra/tegra210_amx.c @@ -222,6 +222,9 @@ static int tegra210_amx_put_byte_map(struct snd_kcontrol *kcontrol, int reg = mc->reg; int value = ucontrol->value.integer.value[0]; + if (value == bytes_map[reg]) + return 0; + if (value >= 0 && value <= 255) { /* Update byte map and enable slot */ bytes_map[reg] = value; diff --git a/sound/soc/tegra/tegra210_dmic.c b/sound/soc/tegra/tegra210_dmic.c index b096478cd2ef..db95794530f4 100644 --- a/sound/soc/tegra/tegra210_dmic.c +++ b/sound/soc/tegra/tegra210_dmic.c @@ -156,51 +156,162 @@ static int tegra210_dmic_hw_params(struct snd_pcm_substream *substream, return 0; } -static int tegra210_dmic_get_control(struct snd_kcontrol *kcontrol, +static int tegra210_dmic_get_boost_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + + ucontrol->value.integer.value[0] = dmic->boost_gain; + + return 0; +} + +static int tegra210_dmic_put_boost_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + int value = ucontrol->value.integer.value[0]; + + if (value == dmic->boost_gain) + return 0; + + dmic->boost_gain = value; + + return 1; +} + +static int tegra210_dmic_get_ch_select(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + + ucontrol->value.enumerated.item[0] = dmic->ch_select; + + return 0; +} + +static int tegra210_dmic_put_ch_select(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dmic->ch_select) + return 0; + + dmic->ch_select = value; + + return 1; +} + +static int tegra210_dmic_get_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + + ucontrol->value.enumerated.item[0] = dmic->mono_to_stereo; + + return 0; +} + +static int tegra210_dmic_put_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dmic->mono_to_stereo) + return 0; + + dmic->mono_to_stereo = value; + + return 1; +} + +static int tegra210_dmic_get_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + + ucontrol->value.enumerated.item[0] = dmic->stereo_to_mono; + + return 0; +} + +static int tegra210_dmic_put_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dmic->stereo_to_mono) + return 0; + + dmic->stereo_to_mono = value; + + return 1; +} + +static int tegra210_dmic_get_osr_val(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); - if (strstr(kcontrol->id.name, "Boost Gain Volume")) - ucontrol->value.integer.value[0] = dmic->boost_gain; - else if (strstr(kcontrol->id.name, "Channel Select")) - ucontrol->value.integer.value[0] = dmic->ch_select; - else if (strstr(kcontrol->id.name, "Mono To Stereo")) - ucontrol->value.integer.value[0] = dmic->mono_to_stereo; - else if (strstr(kcontrol->id.name, "Stereo To Mono")) - ucontrol->value.integer.value[0] = dmic->stereo_to_mono; - else if (strstr(kcontrol->id.name, "OSR Value")) - ucontrol->value.integer.value[0] = dmic->osr_val; - else if (strstr(kcontrol->id.name, "LR Polarity Select")) - ucontrol->value.integer.value[0] = dmic->lrsel; + ucontrol->value.enumerated.item[0] = dmic->osr_val; return 0; } -static int tegra210_dmic_put_control(struct snd_kcontrol *kcontrol, +static int tegra210_dmic_put_osr_val(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); - int value = ucontrol->value.integer.value[0]; + unsigned int value = ucontrol->value.enumerated.item[0]; - if (strstr(kcontrol->id.name, "Boost Gain Volume")) - dmic->boost_gain = value; - else if (strstr(kcontrol->id.name, "Channel Select")) - dmic->ch_select = ucontrol->value.integer.value[0]; - else if (strstr(kcontrol->id.name, "Mono To Stereo")) - dmic->mono_to_stereo = value; - else if (strstr(kcontrol->id.name, "Stereo To Mono")) - dmic->stereo_to_mono = value; - else if (strstr(kcontrol->id.name, "OSR Value")) - dmic->osr_val = value; - else if (strstr(kcontrol->id.name, "LR Polarity Select")) - dmic->lrsel = value; + if (value == dmic->osr_val) + return 0; + + dmic->osr_val = value; + + return 1; +} + +static int tegra210_dmic_get_pol_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + + ucontrol->value.enumerated.item[0] = dmic->lrsel; return 0; } +static int tegra210_dmic_put_pol_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dmic->lrsel) + return 0; + + dmic->lrsel = value; + + return 1; +} + static const struct snd_soc_dai_ops tegra210_dmic_dai_ops = { .hw_params = tegra210_dmic_hw_params, }; @@ -287,19 +398,22 @@ static const struct soc_enum tegra210_dmic_lrsel_enum = static const struct snd_kcontrol_new tegra210_dmic_controls[] = { SOC_SINGLE_EXT("Boost Gain Volume", 0, 0, MAX_BOOST_GAIN, 0, - tegra210_dmic_get_control, tegra210_dmic_put_control), + tegra210_dmic_get_boost_gain, + tegra210_dmic_put_boost_gain), SOC_ENUM_EXT("Channel Select", tegra210_dmic_ch_enum, - tegra210_dmic_get_control, tegra210_dmic_put_control), + tegra210_dmic_get_ch_select, tegra210_dmic_put_ch_select), SOC_ENUM_EXT("Mono To Stereo", - tegra210_dmic_mono_conv_enum, tegra210_dmic_get_control, - tegra210_dmic_put_control), + tegra210_dmic_mono_conv_enum, + tegra210_dmic_get_mono_to_stereo, + tegra210_dmic_put_mono_to_stereo), SOC_ENUM_EXT("Stereo To Mono", - tegra210_dmic_stereo_conv_enum, tegra210_dmic_get_control, - tegra210_dmic_put_control), + tegra210_dmic_stereo_conv_enum, + tegra210_dmic_get_stereo_to_mono, + tegra210_dmic_put_stereo_to_mono), SOC_ENUM_EXT("OSR Value", tegra210_dmic_osr_enum, - tegra210_dmic_get_control, tegra210_dmic_put_control), + tegra210_dmic_get_osr_val, tegra210_dmic_put_osr_val), SOC_ENUM_EXT("LR Polarity Select", tegra210_dmic_lrsel_enum, - tegra210_dmic_get_control, tegra210_dmic_put_control), + tegra210_dmic_get_pol_sel, tegra210_dmic_put_pol_sel), }; static const struct snd_soc_component_driver tegra210_dmic_compnt = { diff --git a/sound/soc/tegra/tegra210_i2s.c b/sound/soc/tegra/tegra210_i2s.c index 45f31ccb49d8..9552bbb939dd 100644 --- a/sound/soc/tegra/tegra210_i2s.c +++ b/sound/soc/tegra/tegra210_i2s.c @@ -302,85 +302,235 @@ static int tegra210_i2s_set_tdm_slot(struct snd_soc_dai *dai, return 0; } -static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai, - unsigned int ratio) +static int tegra210_i2s_get_loopback(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { - struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai); + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); - i2s->bclk_ratio = ratio; + ucontrol->value.integer.value[0] = i2s->loopback; return 0; } -static int tegra210_i2s_get_control(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int tegra210_i2s_put_loopback(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + int value = ucontrol->value.integer.value[0]; + + if (value == i2s->loopback) + return 0; + + i2s->loopback = value; + + regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, I2S_CTRL_LPBK_MASK, + i2s->loopback << I2S_CTRL_LPBK_SHIFT); + + return 1; +} + +static int tegra210_i2s_get_fsync_width(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); - long *uctl_val = &ucontrol->value.integer.value[0]; - - if (strstr(kcontrol->id.name, "Loopback")) - *uctl_val = i2s->loopback; - else if (strstr(kcontrol->id.name, "FSYNC Width")) - *uctl_val = i2s->fsync_width; - else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) - *uctl_val = i2s->stereo_to_mono[I2S_TX_PATH]; - else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) - *uctl_val = i2s->mono_to_stereo[I2S_TX_PATH]; - else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) - *uctl_val = i2s->stereo_to_mono[I2S_RX_PATH]; - else if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) - *uctl_val = i2s->mono_to_stereo[I2S_RX_PATH]; - else if (strstr(kcontrol->id.name, "Playback FIFO Threshold")) - *uctl_val = i2s->rx_fifo_th; - else if (strstr(kcontrol->id.name, "BCLK Ratio")) - *uctl_val = i2s->bclk_ratio; + + ucontrol->value.integer.value[0] = i2s->fsync_width; return 0; } -static int tegra210_i2s_put_control(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int tegra210_i2s_put_fsync_width(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); int value = ucontrol->value.integer.value[0]; - if (strstr(kcontrol->id.name, "Loopback")) { - i2s->loopback = value; + if (value == i2s->fsync_width) + return 0; - regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, - I2S_CTRL_LPBK_MASK, - i2s->loopback << I2S_CTRL_LPBK_SHIFT); + i2s->fsync_width = value; - } else if (strstr(kcontrol->id.name, "FSYNC Width")) { - /* - * Frame sync width is used only for FSYNC modes and not - * applicable for LRCK modes. Reset value for this field is "0", - * which means the width is one bit clock wide. - * The width requirement may depend on the codec and in such - * cases mixer control is used to update custom values. A value - * of "N" here means, width is "N + 1" bit clock wide. - */ - i2s->fsync_width = value; - - regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, - I2S_CTRL_FSYNC_WIDTH_MASK, - i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT); - - } else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) { - i2s->stereo_to_mono[I2S_TX_PATH] = value; - } else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) { - i2s->mono_to_stereo[I2S_TX_PATH] = value; - } else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) { - i2s->stereo_to_mono[I2S_RX_PATH] = value; - } else if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) { - i2s->mono_to_stereo[I2S_RX_PATH] = value; - } else if (strstr(kcontrol->id.name, "Playback FIFO Threshold")) { - i2s->rx_fifo_th = value; - } else if (strstr(kcontrol->id.name, "BCLK Ratio")) { - i2s->bclk_ratio = value; - } + /* + * Frame sync width is used only for FSYNC modes and not + * applicable for LRCK modes. Reset value for this field is "0", + * which means the width is one bit clock wide. + * The width requirement may depend on the codec and in such + * cases mixer control is used to update custom values. A value + * of "N" here means, width is "N + 1" bit clock wide. + */ + regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, + I2S_CTRL_FSYNC_WIDTH_MASK, + i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT); + + return 1; +} + +static int tegra210_i2s_cget_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_TX_PATH]; + + return 0; +} + +static int tegra210_i2s_cput_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == i2s->stereo_to_mono[I2S_TX_PATH]) + return 0; + + i2s->stereo_to_mono[I2S_TX_PATH] = value; + + return 1; +} + +static int tegra210_i2s_cget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_TX_PATH]; + + return 0; +} + +static int tegra210_i2s_cput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == i2s->mono_to_stereo[I2S_TX_PATH]) + return 0; + + i2s->mono_to_stereo[I2S_TX_PATH] = value; + + return 1; +} + +static int tegra210_i2s_pget_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_RX_PATH]; + + return 0; +} + +static int tegra210_i2s_pput_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == i2s->stereo_to_mono[I2S_RX_PATH]) + return 0; + + i2s->stereo_to_mono[I2S_RX_PATH] = value; + + return 1; +} + +static int tegra210_i2s_pget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_RX_PATH]; + + return 0; +} + +static int tegra210_i2s_pput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == i2s->mono_to_stereo[I2S_RX_PATH]) + return 0; + + i2s->mono_to_stereo[I2S_RX_PATH] = value; + + return 1; +} + +static int tegra210_i2s_pget_fifo_th(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.integer.value[0] = i2s->rx_fifo_th; + + return 0; +} + +static int tegra210_i2s_pput_fifo_th(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + int value = ucontrol->value.integer.value[0]; + + if (value == i2s->rx_fifo_th) + return 0; + + i2s->rx_fifo_th = value; + + return 1; +} + +static int tegra210_i2s_get_bclk_ratio(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.integer.value[0] = i2s->bclk_ratio; + + return 0; +} + +static int tegra210_i2s_put_bclk_ratio(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + int value = ucontrol->value.integer.value[0]; + + if (value == i2s->bclk_ratio) + return 0; + + i2s->bclk_ratio = value; + + return 1; +} + +static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai, + unsigned int ratio) +{ + struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai); + + i2s->bclk_ratio = ratio; return 0; } @@ -598,22 +748,28 @@ static const struct soc_enum tegra210_i2s_stereo_conv_enum = tegra210_i2s_stereo_conv_text); static const struct snd_kcontrol_new tegra210_i2s_controls[] = { - SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_control, - tegra210_i2s_put_control), - SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0, tegra210_i2s_get_control, - tegra210_i2s_put_control), + SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_loopback, + tegra210_i2s_put_loopback), + SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0, + tegra210_i2s_get_fsync_width, + tegra210_i2s_put_fsync_width), SOC_ENUM_EXT("Capture Stereo To Mono", tegra210_i2s_stereo_conv_enum, - tegra210_i2s_get_control, tegra210_i2s_put_control), + tegra210_i2s_cget_stereo_to_mono, + tegra210_i2s_cput_stereo_to_mono), SOC_ENUM_EXT("Capture Mono To Stereo", tegra210_i2s_mono_conv_enum, - tegra210_i2s_get_control, tegra210_i2s_put_control), + tegra210_i2s_cget_mono_to_stereo, + tegra210_i2s_cput_mono_to_stereo), SOC_ENUM_EXT("Playback Stereo To Mono", tegra210_i2s_stereo_conv_enum, - tegra210_i2s_get_control, tegra210_i2s_put_control), + tegra210_i2s_pget_mono_to_stereo, + tegra210_i2s_pput_mono_to_stereo), SOC_ENUM_EXT("Playback Mono To Stereo", tegra210_i2s_mono_conv_enum, - tegra210_i2s_get_control, tegra210_i2s_put_control), + tegra210_i2s_pget_stereo_to_mono, + tegra210_i2s_pput_stereo_to_mono), SOC_SINGLE_EXT("Playback FIFO Threshold", 0, 0, I2S_RX_FIFO_DEPTH - 1, - 0, tegra210_i2s_get_control, tegra210_i2s_put_control), - SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0, tegra210_i2s_get_control, - tegra210_i2s_put_control), + 0, tegra210_i2s_pget_fifo_th, tegra210_i2s_pput_fifo_th), + SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0, + tegra210_i2s_get_bclk_ratio, + tegra210_i2s_put_bclk_ratio), }; static const struct snd_soc_dapm_widget tegra210_i2s_widgets[] = { diff --git a/sound/soc/tegra/tegra210_mixer.c b/sound/soc/tegra/tegra210_mixer.c index 55e61776c565..51d375573cfa 100644 --- a/sound/soc/tegra/tegra210_mixer.c +++ b/sound/soc/tegra/tegra210_mixer.c @@ -192,24 +192,24 @@ static int tegra210_mixer_get_gain(struct snd_kcontrol *kcontrol, return 0; } -static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int tegra210_mixer_apply_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol, + bool instant_gain) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt); unsigned int reg = mc->reg, id; - bool instant_gain = false; int err; - if (strstr(kcontrol->id.name, "Instant Gain Volume")) - instant_gain = true; - /* Save gain value for specific MIXER input */ id = (reg - TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_0) / TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_STRIDE; + if (mixer->gain_value[id] == ucontrol->value.integer.value[0]) + return 0; + mixer->gain_value[id] = ucontrol->value.integer.value[0]; err = tegra210_mixer_configure_gain(cmpnt, id, instant_gain); @@ -221,6 +221,18 @@ static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol, return 1; } +static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + return tegra210_mixer_apply_gain(kcontrol, ucontrol, false); +} + +static int tegra210_mixer_put_instant_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + return tegra210_mixer_apply_gain(kcontrol, ucontrol, true); +} + static int tegra210_mixer_set_audio_cif(struct tegra210_mixer *mixer, struct snd_pcm_hw_params *params, unsigned int reg, @@ -388,7 +400,7 @@ ADDER_CTRL_DECL(adder5, TEGRA210_MIXER_TX5_ADDER_CONFIG); SOC_SINGLE_EXT("RX" #id " Instant Gain Volume", \ MIXER_GAIN_CFG_RAM_ADDR((id) - 1), 0, \ 0x20000, 0, tegra210_mixer_get_gain, \ - tegra210_mixer_put_gain), + tegra210_mixer_put_instant_gain), /* Volume controls for all MIXER inputs */ static const struct snd_kcontrol_new tegra210_mixer_gain_ctls[] = { diff --git a/sound/soc/tegra/tegra210_mvc.c b/sound/soc/tegra/tegra210_mvc.c index 7b9c7006e419..85b155887ec2 100644 --- a/sound/soc/tegra/tegra210_mvc.c +++ b/sound/soc/tegra/tegra210_mvc.c @@ -136,7 +136,7 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol, struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt); unsigned int value; - u8 mute_mask; + u8 new_mask, old_mask; int err; pm_runtime_get_sync(cmpnt->dev); @@ -148,11 +148,19 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol, if (err < 0) goto end; - mute_mask = ucontrol->value.integer.value[0]; + regmap_read(mvc->regmap, TEGRA210_MVC_CTRL, &value); + + old_mask = (value >> TEGRA210_MVC_MUTE_SHIFT) & TEGRA210_MUTE_MASK_EN; + new_mask = ucontrol->value.integer.value[0]; + + if (new_mask == old_mask) { + err = 0; + goto end; + } err = regmap_update_bits(mvc->regmap, mc->reg, TEGRA210_MVC_MUTE_MASK, - mute_mask << TEGRA210_MVC_MUTE_SHIFT); + new_mask << TEGRA210_MVC_MUTE_SHIFT); if (err < 0) goto end; @@ -195,7 +203,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol, unsigned int reg = mc->reg; unsigned int value; u8 chan; - int err; + int err, old_volume; pm_runtime_get_sync(cmpnt->dev); @@ -207,10 +215,16 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol, goto end; chan = (reg - TEGRA210_MVC_TARGET_VOL) / REG_SIZE; + old_volume = mvc->volume[chan]; tegra210_mvc_conv_vol(mvc, chan, ucontrol->value.integer.value[0]); + if (mvc->volume[chan] == old_volume) { + err = 0; + goto end; + } + /* Configure init volume same as target volume */ regmap_write(mvc->regmap, TEGRA210_MVC_REG_OFFSET(TEGRA210_MVC_INIT_VOL, chan), @@ -275,7 +289,7 @@ static int tegra210_mvc_get_curve_type(struct snd_kcontrol *kcontrol, struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt); - ucontrol->value.integer.value[0] = mvc->curve_type; + ucontrol->value.enumerated.item[0] = mvc->curve_type; return 0; } @@ -285,7 +299,7 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol, { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt); - int value; + unsigned int value; regmap_read(mvc->regmap, TEGRA210_MVC_ENABLE, &value); if (value & TEGRA210_MVC_EN) { @@ -294,10 +308,10 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol, return -EINVAL; } - if (mvc->curve_type == ucontrol->value.integer.value[0]) + if (mvc->curve_type == ucontrol->value.enumerated.item[0]) return 0; - mvc->curve_type = ucontrol->value.integer.value[0]; + mvc->curve_type = ucontrol->value.enumerated.item[0]; tegra210_mvc_reset_vol_settings(mvc, cmpnt->dev); diff --git a/sound/soc/tegra/tegra210_sfc.c b/sound/soc/tegra/tegra210_sfc.c index dc477ee1b82c..7a2227ed3df6 100644 --- a/sound/soc/tegra/tegra210_sfc.c +++ b/sound/soc/tegra/tegra210_sfc.c @@ -3244,46 +3244,107 @@ static int tegra210_sfc_init(struct snd_soc_dapm_widget *w, return tegra210_sfc_write_coeff_ram(cmpnt); } -static int tegra210_sfc_get_control(struct snd_kcontrol *kcontrol, +static int tegra210_sfc_iget_stereo_to_mono(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); - if (strstr(kcontrol->id.name, "Input Stereo To Mono")) - ucontrol->value.integer.value[0] = - sfc->stereo_to_mono[SFC_RX_PATH]; - else if (strstr(kcontrol->id.name, "Input Mono To Stereo")) - ucontrol->value.integer.value[0] = - sfc->mono_to_stereo[SFC_RX_PATH]; - else if (strstr(kcontrol->id.name, "Output Stereo To Mono")) - ucontrol->value.integer.value[0] = - sfc->stereo_to_mono[SFC_TX_PATH]; - else if (strstr(kcontrol->id.name, "Output Mono To Stereo")) - ucontrol->value.integer.value[0] = - sfc->mono_to_stereo[SFC_TX_PATH]; + ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_RX_PATH]; return 0; } -static int tegra210_sfc_put_control(struct snd_kcontrol *kcontrol, +static int tegra210_sfc_iput_stereo_to_mono(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); - int value = ucontrol->value.integer.value[0]; - - if (strstr(kcontrol->id.name, "Input Stereo To Mono")) - sfc->stereo_to_mono[SFC_RX_PATH] = value; - else if (strstr(kcontrol->id.name, "Input Mono To Stereo")) - sfc->mono_to_stereo[SFC_RX_PATH] = value; - else if (strstr(kcontrol->id.name, "Output Stereo To Mono")) - sfc->stereo_to_mono[SFC_TX_PATH] = value; - else if (strstr(kcontrol->id.name, "Output Mono To Stereo")) - sfc->mono_to_stereo[SFC_TX_PATH] = value; - else + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == sfc->stereo_to_mono[SFC_RX_PATH]) + return 0; + + sfc->stereo_to_mono[SFC_RX_PATH] = value; + + return 1; +} + +static int tegra210_sfc_iget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + + ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_RX_PATH]; + + return 0; +} + +static int tegra210_sfc_iput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == sfc->mono_to_stereo[SFC_RX_PATH]) return 0; + sfc->mono_to_stereo[SFC_RX_PATH] = value; + + return 1; +} + +static int tegra210_sfc_oget_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + + ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_TX_PATH]; + + return 0; +} + +static int tegra210_sfc_oput_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == sfc->stereo_to_mono[SFC_TX_PATH]) + return 0; + + sfc->stereo_to_mono[SFC_TX_PATH] = value; + + return 1; +} + +static int tegra210_sfc_oget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + + ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_TX_PATH]; + + return 0; +} + +static int tegra210_sfc_oput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == sfc->mono_to_stereo[SFC_TX_PATH]) + return 0; + + sfc->mono_to_stereo[SFC_TX_PATH] = value; + return 1; } @@ -3384,13 +3445,17 @@ static const struct soc_enum tegra210_sfc_mono_conv_enum = static const struct snd_kcontrol_new tegra210_sfc_controls[] = { SOC_ENUM_EXT("Input Stereo To Mono", tegra210_sfc_stereo_conv_enum, - tegra210_sfc_get_control, tegra210_sfc_put_control), + tegra210_sfc_iget_stereo_to_mono, + tegra210_sfc_iput_stereo_to_mono), SOC_ENUM_EXT("Input Mono To Stereo", tegra210_sfc_mono_conv_enum, - tegra210_sfc_get_control, tegra210_sfc_put_control), + tegra210_sfc_iget_mono_to_stereo, + tegra210_sfc_iput_mono_to_stereo), SOC_ENUM_EXT("Output Stereo To Mono", tegra210_sfc_stereo_conv_enum, - tegra210_sfc_get_control, tegra210_sfc_put_control), + tegra210_sfc_oget_stereo_to_mono, + tegra210_sfc_oput_stereo_to_mono), SOC_ENUM_EXT("Output Mono To Stereo", tegra210_sfc_mono_conv_enum, - tegra210_sfc_get_control, tegra210_sfc_put_control), + tegra210_sfc_oget_mono_to_stereo, + tegra210_sfc_oput_mono_to_stereo), }; static const struct snd_soc_component_driver tegra210_sfc_cmpnt = { diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile index 692e1b947490..ac8487dcff1d 100644 --- a/tools/bpf/bpftool/Documentation/Makefile +++ b/tools/bpf/bpftool/Documentation/Makefile @@ -24,7 +24,7 @@ man: man8 man8: $(DOC_MAN8) RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null) -RST2MAN_OPTS += --verbose +RST2MAN_OPTS += --verbose --strip-comments list_pages = $(sort $(basename $(filter-out $(1),$(MAN8_RST)))) see_also = $(subst " ",, \ diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst index 4425d942dd39..342716f74ec4 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-btf ================ @@ -7,13 +9,14 @@ tool for inspection of BTF data :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **btf** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | {**-d** | **--debug** } | - { **-B** | **--base-btf** } } + *OPTIONS* := { |COMMON_OPTIONS| | { **-B** | **--base-btf** } } *COMMANDS* := { **dump** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index 8069d37dd991..a17e9aa314fd 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-cgroup ================ @@ -7,13 +9,14 @@ tool for inspection and simple manipulation of eBPF progs :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **cgroup** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-f** | **--bpffs** } } + *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } } *COMMANDS* := { **show** | **list** | **tree** | **attach** | **detach** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst index ab9f57ee4c3a..4ce9a77bc1e0 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + =============== bpftool-feature =============== @@ -7,12 +9,14 @@ tool for inspection of eBPF-related parameters for Linux kernel or net device :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **feature** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { |COMMON_OPTIONS| } *COMMANDS* := { **probe** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst index 2a137f8a4cea..bc276388f432 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-gen ================ @@ -7,13 +9,14 @@ tool for BPF code-generation :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **gen** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-L** | **--use-loader** } } + *OPTIONS* := { |COMMON_OPTIONS| | { **-L** | **--use-loader** } } *COMMAND* := { **object** | **skeleton** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-iter.rst b/tools/bpf/bpftool/Documentation/bpftool-iter.rst index 471f363a725a..84839d488621 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-iter.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-iter.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ============ bpftool-iter ============ @@ -7,12 +9,14 @@ tool to create BPF iterators :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **iter** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { |COMMON_OPTIONS| } *COMMANDS* := { **pin** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-link.rst b/tools/bpf/bpftool/Documentation/bpftool-link.rst index 9434349636a5..52a4eee4af54 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-link.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-link.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-link ================ @@ -7,13 +9,14 @@ tool for inspection and simple manipulation of eBPF links :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **link** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-f** | **--bpffs** } | { **-n** | **--nomount** } } + *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } } *COMMANDS* := { **show** | **list** | **pin** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 991d18fd84f2..7c188a598444 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-map ================ @@ -7,13 +9,14 @@ tool for inspection and simple manipulation of eBPF maps :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **map** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-f** | **--bpffs** } | { **-n** | **--nomount** } } + *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } } *COMMANDS* := { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** | diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst index 7ec57535a7c1..f4e0a516335a 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-net.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-net ================ @@ -7,12 +9,14 @@ tool for inspection of netdev/tc related bpf prog attachments :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **net** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { |COMMON_OPTIONS| } *COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst index ce52798a917d..5fea633a82f1 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-perf ================ @@ -7,12 +9,14 @@ tool for inspection of perf related bpf prog attachments :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **perf** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { |COMMON_OPTIONS| } *COMMANDS* := { **show** | **list** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index f27265bd589b..a2e9359e554c 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ bpftool-prog ================ @@ -7,12 +9,14 @@ tool for inspection and simple manipulation of eBPF progs :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **prog** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | + *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } | { **-L** | **--use-loader** } } diff --git a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst index 02afc0fc14cb..ee53a122c0c7 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================== bpftool-struct_ops ================== @@ -7,12 +9,14 @@ tool to register/unregister/introspect BPF struct_ops :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== **bpftool** [*OPTIONS*] **struct_ops** *COMMAND* - *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { |COMMON_OPTIONS| } *COMMANDS* := { **show** | **list** | **dump** | **register** | **unregister** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst index 8ac86565c501..7084dd9fa2f8 100644 --- a/tools/bpf/bpftool/Documentation/bpftool.rst +++ b/tools/bpf/bpftool/Documentation/bpftool.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + ================ BPFTOOL ================ @@ -7,6 +9,8 @@ tool for inspection and simple manipulation of eBPF programs and maps :Manual section: 8 +.. include:: substitutions.rst + SYNOPSIS ======== @@ -18,8 +22,7 @@ SYNOPSIS *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** } - *OPTIONS* := { { **-V** | **--version** } | - { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + *OPTIONS* := { { **-V** | **--version** } | |COMMON_OPTIONS| } *MAP-COMMANDS* := { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** | diff --git a/tools/bpf/bpftool/Documentation/common_options.rst b/tools/bpf/bpftool/Documentation/common_options.rst index 75adf23202d8..908487b9c2ad 100644 --- a/tools/bpf/bpftool/Documentation/common_options.rst +++ b/tools/bpf/bpftool/Documentation/common_options.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + -h, --help Print short help message (similar to **bpftool help**). diff --git a/tools/bpf/bpftool/Documentation/substitutions.rst b/tools/bpf/bpftool/Documentation/substitutions.rst new file mode 100644 index 000000000000..ccf1ffa0686c --- /dev/null +++ b/tools/bpf/bpftool/Documentation/substitutions.rst @@ -0,0 +1,3 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +.. |COMMON_OPTIONS| replace:: { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | { **-l** | **--legacy** } diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 997a2865e04a..b4695df2ea3d 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -486,7 +486,6 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name) static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard) { - struct bpf_object_load_attr load_attr = {}; DECLARE_LIBBPF_OPTS(gen_loader_opts, opts); struct bpf_map *map; char ident[256]; @@ -496,12 +495,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h if (err) return err; - load_attr.obj = obj; - if (verifier_logs) - /* log_level1 + log_level2 + stats, but not stable UAPI */ - load_attr.log_level = 1 + 2 + 4; - - err = bpf_object__load_xattr(&load_attr); + err = bpf_object__load(obj); if (err) { p_err("failed to load object file"); goto out; @@ -719,6 +713,9 @@ static int do_skeleton(int argc, char **argv) if (obj_name[0] == '\0') get_obj_name(obj_name, file); opts.object_name = obj_name; + if (verifier_logs) + /* log_level1 + log_level2 + stats, but not stable UAPI */ + opts.kernel_log_level = 1 + 2 + 4; obj = bpf_object__open_mem(obj_data, file_sz, &opts); err = libbpf_get_error(obj); if (err) { diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 473791e87f7d..8b71500e7cb2 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -93,6 +93,7 @@ static int do_version(int argc, char **argv) jsonw_name(json_wtr, "features"); jsonw_start_object(json_wtr); /* features */ jsonw_bool_field(json_wtr, "libbfd", has_libbfd); + jsonw_bool_field(json_wtr, "libbpf_strict", !legacy_libbpf); jsonw_bool_field(json_wtr, "skeletons", has_skeletons); jsonw_end_object(json_wtr); /* features */ @@ -106,6 +107,10 @@ static int do_version(int argc, char **argv) printf(" libbfd"); nb_features++; } + if (!legacy_libbpf) { + printf("%s libbpf_strict", nb_features++ ? "," : ""); + nb_features++; + } if (has_skeletons) printf("%s skeletons", nb_features++ ? "," : ""); printf("\n"); @@ -400,6 +405,7 @@ int main(int argc, char **argv) { "legacy", no_argument, NULL, 'l' }, { 0 } }; + bool version_requested = false; int opt, ret; last_do_help = do_help; @@ -414,7 +420,8 @@ int main(int argc, char **argv) options, NULL)) >= 0) { switch (opt) { case 'V': - return do_version(argc, argv); + version_requested = true; + break; case 'h': return do_help(argc, argv); case 'p': @@ -479,6 +486,9 @@ int main(int argc, char **argv) if (argc < 0) usage(); + if (version_requested) + return do_version(argc, argv); + ret = cmd_select(cmds, argc, argv, do_help); if (json_output) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 25b258804f11..cc530a229812 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -1261,7 +1261,10 @@ static int do_pin(int argc, char **argv) static int do_create(int argc, char **argv) { - struct bpf_create_map_attr attr = { NULL, }; + LIBBPF_OPTS(bpf_map_create_opts, attr); + enum bpf_map_type map_type = BPF_MAP_TYPE_UNSPEC; + __u32 key_size = 0, value_size = 0, max_entries = 0; + const char *map_name = NULL; const char *pinfile; int err = -1, fd; @@ -1276,30 +1279,30 @@ static int do_create(int argc, char **argv) if (is_prefix(*argv, "type")) { NEXT_ARG(); - if (attr.map_type) { + if (map_type) { p_err("map type already specified"); goto exit; } - attr.map_type = map_type_from_str(*argv); - if ((int)attr.map_type < 0) { + map_type = map_type_from_str(*argv); + if ((int)map_type < 0) { p_err("unrecognized map type: %s", *argv); goto exit; } NEXT_ARG(); } else if (is_prefix(*argv, "name")) { NEXT_ARG(); - attr.name = GET_ARG(); + map_name = GET_ARG(); } else if (is_prefix(*argv, "key")) { - if (parse_u32_arg(&argc, &argv, &attr.key_size, + if (parse_u32_arg(&argc, &argv, &key_size, "key size")) goto exit; } else if (is_prefix(*argv, "value")) { - if (parse_u32_arg(&argc, &argv, &attr.value_size, + if (parse_u32_arg(&argc, &argv, &value_size, "value size")) goto exit; } else if (is_prefix(*argv, "entries")) { - if (parse_u32_arg(&argc, &argv, &attr.max_entries, + if (parse_u32_arg(&argc, &argv, &max_entries, "max entries")) goto exit; } else if (is_prefix(*argv, "flags")) { @@ -1340,14 +1343,14 @@ static int do_create(int argc, char **argv) } } - if (!attr.name) { + if (!map_name) { p_err("map name not specified"); goto exit; } set_max_rlimit(); - fd = bpf_create_map_xattr(&attr); + fd = bpf_map_create(map_type, map_name, key_size, value_size, max_entries, &attr); if (fd < 0) { p_err("map create failed: %s", strerror(errno)); goto exit; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index e47e8b06cc3d..f874896c4154 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1464,7 +1464,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts, .relaxed_maps = relaxed_maps, ); - struct bpf_object_load_attr load_attr = { 0 }; enum bpf_attach_type expected_attach_type; struct map_replace *map_replace = NULL; struct bpf_program *prog = NULL, *pos; @@ -1598,6 +1597,10 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) set_max_rlimit(); + if (verifier_logs) + /* log_level1 + log_level2 + stats, but not stable UAPI */ + open_opts.kernel_log_level = 1 + 2 + 4; + obj = bpf_object__open_file(file, &open_opts); if (libbpf_get_error(obj)) { p_err("failed to open object file"); @@ -1677,12 +1680,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) goto err_close_obj; } - load_attr.obj = obj; - if (verifier_logs) - /* log_level1 + log_level2 + stats, but not stable UAPI */ - load_attr.log_level = 1 + 2 + 4; - - err = bpf_object__load_xattr(&load_attr); + err = bpf_object__load(obj); if (err) { p_err("failed to load object file"); goto err_close_obj; @@ -1774,17 +1772,19 @@ static int try_loader(struct gen_loader_opts *gen) sizeof(struct bpf_prog_desc)); int log_buf_sz = (1u << 24) - 1; int err, fds_before, fd_delta; - char *log_buf; + char *log_buf = NULL; ctx = alloca(ctx_sz); memset(ctx, 0, ctx_sz); ctx->sz = ctx_sz; - ctx->log_level = 1; - ctx->log_size = log_buf_sz; - log_buf = malloc(log_buf_sz); - if (!log_buf) - return -ENOMEM; - ctx->log_buf = (long) log_buf; + if (verifier_logs) { + ctx->log_level = 1 + 2 + 4; + ctx->log_size = log_buf_sz; + log_buf = malloc(log_buf_sz); + if (!log_buf) + return -ENOMEM; + ctx->log_buf = (long) log_buf; + } opts.ctx = ctx; opts.data = gen->data; opts.data_sz = gen->data_sz; @@ -1793,9 +1793,9 @@ static int try_loader(struct gen_loader_opts *gen) fds_before = count_open_fds(); err = bpf_load_and_run(&opts); fd_delta = count_open_fds() - fds_before; - if (err < 0) { + if (err < 0 || verifier_logs) { fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf); - if (fd_delta) + if (fd_delta && err < 0) fprintf(stderr, "loader prog leaked %d FDs\n", fd_delta); } @@ -1807,7 +1807,6 @@ static int do_loader(int argc, char **argv) { DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts); DECLARE_LIBBPF_OPTS(gen_loader_opts, gen); - struct bpf_object_load_attr load_attr = {}; struct bpf_object *obj; const char *file; int err = 0; @@ -1816,6 +1815,10 @@ static int do_loader(int argc, char **argv) return -1; file = GET_ARG(); + if (verifier_logs) + /* log_level1 + log_level2 + stats, but not stable UAPI */ + open_opts.kernel_log_level = 1 + 2 + 4; + obj = bpf_object__open_file(file, &open_opts); if (libbpf_get_error(obj)) { p_err("failed to open object file"); @@ -1826,12 +1829,7 @@ static int do_loader(int argc, char **argv) if (err) goto err_close_obj; - load_attr.obj = obj; - if (verifier_logs) - /* log_level1 + log_level2 + stats, but not stable UAPI */ - load_attr.log_level = 1 + 2 + 4; - - err = bpf_object__load_xattr(&load_attr); + err = bpf_object__load(obj); if (err) { p_err("failed to load object file"); goto err_close_obj; diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c index cbdca37a53f0..2f693b082bdb 100644 --- a/tools/bpf/bpftool/struct_ops.c +++ b/tools/bpf/bpftool/struct_ops.c @@ -479,7 +479,7 @@ static int do_unregister(int argc, char **argv) static int do_register(int argc, char **argv) { - struct bpf_object_load_attr load_attr = {}; + LIBBPF_OPTS(bpf_object_open_opts, open_opts); const struct bpf_map_def *def; struct bpf_map_info info = {}; __u32 info_len = sizeof(info); @@ -494,18 +494,17 @@ static int do_register(int argc, char **argv) file = GET_ARG(); - obj = bpf_object__open(file); + if (verifier_logs) + /* log_level1 + log_level2 + stats, but not stable UAPI */ + open_opts.kernel_log_level = 1 + 2 + 4; + + obj = bpf_object__open_file(file, &open_opts); if (libbpf_get_error(obj)) return -1; set_max_rlimit(); - load_attr.obj = obj; - if (verifier_logs) - /* log_level1 + log_level2 + stats, but not stable UAPI */ - load_attr.log_level = 1 + 2 + 4; - - if (bpf_object__load_xattr(&load_attr)) { + if (bpf_object__load(obj)) { bpf_object__close(obj); return -1; } diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index a59cb0ee609c..5d26f3c6f918 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -83,6 +83,7 @@ struct btf_id { int cnt; }; int addr_cnt; + bool is_set; Elf64_Addr addr[ADDR_CNT]; }; @@ -167,7 +168,7 @@ static struct btf_id *btf_id__find(struct rb_root *root, const char *name) return NULL; } -static struct btf_id* +static struct btf_id * btf_id__add(struct rb_root *root, char *name, bool unique) { struct rb_node **p = &root->rb_node; @@ -451,8 +452,10 @@ static int symbols_collect(struct object *obj) * in symbol's size, together with 'cnt' field hence * that - 1. */ - if (id) + if (id) { id->cnt = sym.st_size / sizeof(int) - 1; + id->is_set = true; + } } else { pr_err("FAILED unsupported prefix %s\n", prefix); return -1; @@ -568,9 +571,8 @@ static int id_patch(struct object *obj, struct btf_id *id) int *ptr = data->d_buf; int i; - if (!id->id) { + if (!id->id && !id->is_set) pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name); - } for (i = 0; i < id->addr_cnt; i++) { unsigned long addr = id->addr[i]; @@ -730,7 +732,8 @@ int main(int argc, const char **argv) if (obj.efile.idlist_shndx == -1 || obj.efile.symbols_shndx == -1) { pr_debug("Cannot find .BTF_ids or symbols sections, nothing to do\n"); - return 0; + err = 0; + goto out; } if (symbols_collect(&obj)) diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 45a9a59828c3..ae61f464043a 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -48,7 +48,6 @@ FEATURE_TESTS_BASIC := \ numa_num_possible_cpus \ libperl \ libpython \ - libpython-version \ libslang \ libslang-include-subdir \ libtraceevent \ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 0a3244ad9673..1480910c792e 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -32,7 +32,6 @@ FILES= \ test-numa_num_possible_cpus.bin \ test-libperl.bin \ test-libpython.bin \ - test-libpython-version.bin \ test-libslang.bin \ test-libslang-include-subdir.bin \ test-libtraceevent.bin \ @@ -227,9 +226,6 @@ $(OUTPUT)test-libperl.bin: $(OUTPUT)test-libpython.bin: $(BUILD) $(FLAGS_PYTHON_EMBED) -$(OUTPUT)test-libpython-version.bin: - $(BUILD) - $(OUTPUT)test-libbfd.bin: $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index 0b243ce842be..5ffafb967b6e 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c @@ -14,10 +14,6 @@ # include "test-libpython.c" #undef main -#define main main_test_libpython_version -# include "test-libpython-version.c" -#undef main - #define main main_test_libperl # include "test-libperl.c" #undef main @@ -177,7 +173,6 @@ int main(int argc, char *argv[]) { main_test_libpython(); - main_test_libpython_version(); main_test_libperl(); main_test_hello(); main_test_libelf(); diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c index 82070eadfc07..727d22e34a6e 100644 --- a/tools/build/feature/test-bpf.c +++ b/tools/build/feature/test-bpf.c @@ -14,6 +14,12 @@ # define __NR_bpf 349 # elif defined(__s390__) # define __NR_bpf 351 +# elif defined(__mips__) && defined(_ABIO32) +# define __NR_bpf 4355 +# elif defined(__mips__) && defined(_ABIN32) +# define __NR_bpf 6319 +# elif defined(__mips__) && defined(_ABI64) +# define __NR_bpf 5315 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c deleted file mode 100644 index 47714b942d4d..000000000000 --- a/tools/build/feature/test-libpython-version.c +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <Python.h> - -#if PY_VERSION_HEX >= 0x03000000 - #error -#endif - -int main(void) -{ - return 0; -} diff --git a/tools/include/linux/debug_locks.h b/tools/include/linux/debug_locks.h deleted file mode 100644 index 72d595ce764a..000000000000 --- a/tools/include/linux/debug_locks.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_ -#define _LIBLOCKDEP_DEBUG_LOCKS_H_ - -#include <stddef.h> -#include <linux/compiler.h> -#include <asm/bug.h> - -#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x) - -extern bool debug_locks; -extern bool debug_locks_silent; - -#endif diff --git a/tools/include/linux/hardirq.h b/tools/include/linux/hardirq.h deleted file mode 100644 index b25580b6a9be..000000000000 --- a/tools/include/linux/hardirq.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_ -#define _LIBLOCKDEP_LINUX_HARDIRQ_H_ - -#define SOFTIRQ_BITS 0UL -#define HARDIRQ_BITS 0UL -#define SOFTIRQ_SHIFT 0UL -#define HARDIRQ_SHIFT 0UL -#define hardirq_count() 0UL -#define softirq_count() 0UL - -#endif diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h deleted file mode 100644 index 501262aee8ff..000000000000 --- a/tools/include/linux/irqflags.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ -#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ - -# define lockdep_hardirq_context() 0 -# define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled() 0 -# define lockdep_softirqs_enabled(p) 0 -# define lockdep_hardirq_enter() do { } while (0) -# define lockdep_hardirq_exit() do { } while (0) -# define lockdep_softirq_enter() do { } while (0) -# define lockdep_softirq_exit() do { } while (0) -# define INIT_TRACE_IRQFLAGS - -# define stop_critical_timings() do { } while (0) -# define start_critical_timings() do { } while (0) - -#define raw_local_irq_disable() do { } while (0) -#define raw_local_irq_enable() do { } while (0) -#define raw_local_irq_save(flags) ((flags) = 0) -#define raw_local_irq_restore(flags) ((void)(flags)) -#define raw_local_save_flags(flags) ((flags) = 0) -#define raw_irqs_disabled_flags(flags) ((void)(flags)) -#define raw_irqs_disabled() 0 -#define raw_safe_halt() - -#define local_irq_enable() do { } while (0) -#define local_irq_disable() do { } while (0) -#define local_irq_save(flags) ((flags) = 0) -#define local_irq_restore(flags) ((void)(flags)) -#define local_save_flags(flags) ((flags) = 0) -#define irqs_disabled() (1) -#define irqs_disabled_flags(flags) ((void)(flags), 0) -#define safe_halt() do { } while (0) - -#define trace_lock_release(x, y) -#define trace_lock_acquire(a, b, c, d, e, f, g) - -#endif diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h index a7e54a08fb54..3e8df500cfbd 100644 --- a/tools/include/linux/kernel.h +++ b/tools/include/linux/kernel.h @@ -7,6 +7,7 @@ #include <assert.h> #include <linux/build_bug.h> #include <linux/compiler.h> +#include <linux/math.h> #include <endian.h> #include <byteswap.h> @@ -14,8 +15,6 @@ #define UINT_MAX (~0U) #endif -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) - #define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1) #define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) @@ -52,15 +51,6 @@ _min1 < _min2 ? _min1 : _min2; }) #endif -#ifndef roundup -#define roundup(x, y) ( \ -{ \ - const typeof(y) __y = y; \ - (((x) + (__y - 1)) / __y) * __y; \ -} \ -) -#endif - #ifndef BUG_ON #ifdef NDEBUG #define BUG_ON(cond) do { if (cond) {} } while (0) @@ -104,16 +94,6 @@ int scnprintf_pad(char * buf, size_t size, const char * fmt, ...); #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) -/* - * This looks more complex than it should be. But we need to - * get the type for the ~ right in round_down (it needs to be - * as wide as the result!), and we want to evaluate the macro - * arguments just once each. - */ -#define __round_mask(x, y) ((__typeof__(x))((y)-1)) -#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) -#define round_down(x, y) ((x) & ~__round_mask(x, y)) - #define current_gfp_context(k) 0 #define synchronize_rcu() diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h deleted file mode 100644 index e56997288f2b..000000000000 --- a/tools/include/linux/lockdep.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LOCKDEP_H_ -#define _LIBLOCKDEP_LOCKDEP_H_ - -#include <sys/prctl.h> -#include <sys/syscall.h> -#include <string.h> -#include <limits.h> -#include <linux/utsname.h> -#include <linux/compiler.h> -#include <linux/export.h> -#include <linux/kern_levels.h> -#include <linux/err.h> -#include <linux/rcu.h> -#include <linux/list.h> -#include <linux/hardirq.h> -#include <unistd.h> - -#define MAX_LOCK_DEPTH 63UL - -#define asmlinkage -#define __visible - -#include "../../../include/linux/lockdep.h" - -struct task_struct { - u64 curr_chain_key; - int lockdep_depth; - unsigned int lockdep_recursion; - struct held_lock held_locks[MAX_LOCK_DEPTH]; - gfp_t lockdep_reclaim_gfp; - int pid; - int state; - char comm[17]; -}; - -#define TASK_RUNNING 0 - -extern struct task_struct *__curr(void); - -#define current (__curr()) - -static inline int debug_locks_off(void) -{ - return 1; -} - -#define task_pid_nr(tsk) ((tsk)->pid) - -#define KSYM_NAME_LEN 128 -#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) -#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) -#define pr_warn pr_err -#define pr_cont pr_err - -#define list_del_rcu list_del - -#define atomic_t unsigned long -#define atomic_inc(x) ((*(x))++) - -#define print_tainted() "" -#define static_obj(x) 1 - -#define debug_show_all_locks() -extern void debug_check_no_locks_held(void); - -static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr) -{ - return false; -} - -#endif diff --git a/tools/include/linux/math.h b/tools/include/linux/math.h new file mode 100644 index 000000000000..4e7af99ec9eb --- /dev/null +++ b/tools/include/linux/math.h @@ -0,0 +1,25 @@ +#ifndef _TOOLS_MATH_H +#define _TOOLS_MATH_H + +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) + +#ifndef roundup +#define roundup(x, y) ( \ +{ \ + const typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ +) +#endif + +#endif diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h deleted file mode 100644 index 8b3b03b64fda..000000000000 --- a/tools/include/linux/proc_fs.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H -#define _TOOLS_INCLUDE_LINUX_PROC_FS_H - -#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */ diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h index c934572d935c..622266b197d0 100644 --- a/tools/include/linux/spinlock.h +++ b/tools/include/linux/spinlock.h @@ -37,6 +37,4 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex) return true; } -#include <linux/lockdep.h> - #endif diff --git a/tools/include/linux/stacktrace.h b/tools/include/linux/stacktrace.h deleted file mode 100644 index ae343ac35bfa..000000000000 --- a/tools/include/linux/stacktrace.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_ -#define _LIBLOCKDEP_LINUX_STACKTRACE_H_ - -#include <execinfo.h> - -struct stack_trace { - unsigned int nr_entries, max_entries; - unsigned long *entries; - int skip; -}; - -static inline void print_stack_trace(struct stack_trace *trace, int spaces) -{ - backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); -} - -#define save_stack_trace(trace) \ - ((trace)->nr_entries = \ - backtrace((void **)(trace)->entries, (trace)->max_entries)) - -static inline int dump_stack(void) -{ - void *array[64]; - size_t size; - - size = backtrace(array, 64); - backtrace_symbols_fd(array, size, 1); - - return 0; -} - -#endif diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 6297eafdc40f..c26871263f1f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1342,8 +1342,10 @@ union bpf_attr { /* or valid module BTF object fd or 0 to attach to vmlinux */ __u32 attach_btf_obj_fd; }; - __u32 :32; /* pad */ + __u32 core_relo_cnt; /* number of bpf_core_relo */ __aligned_u64 fd_array; /* array of FDs */ + __aligned_u64 core_relos; + __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -1744,7 +1746,7 @@ union bpf_attr { * if the maximum number of tail calls has been reached for this * chain of programs. This limit is defined in the kernel by the * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), - * which is currently set to 32. + * which is currently set to 33. * Return * 0 on success, or a negative error in case of failure. * @@ -4957,6 +4959,30 @@ union bpf_attr { * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. * **-EBUSY** if failed to try lock mmap_lock. * **-EINVAL** for invalid **flags**. + * + * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags) + * Description + * For **nr_loops**, call **callback_fn** function + * with **callback_ctx** as the context parameter. + * The **callback_fn** should be a static function and + * the **callback_ctx** should be a pointer to the stack. + * The **flags** is used to control certain aspects of the helper. + * Currently, the **flags** must be 0. Currently, nr_loops is + * limited to 1 << 23 (~8 million) loops. + * + * long (\*callback_fn)(u32 index, void \*ctx); + * + * where **index** is the current index in the loop. The index + * is zero-indexed. + * + * If **callback_fn** returns 0, the helper will continue to the next + * loop. If return value is 1, the helper will skip the rest of + * the loops and return. Other return values are not used now, + * and will be rejected by the verifier. + * + * Return + * The number of loops performed, **-EINVAL** for invalid **flags**, + * **-E2BIG** if **nr_loops** exceeds the maximum number of loops. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5140,6 +5166,7 @@ union bpf_attr { FN(skc_to_unix_sock), \ FN(kallsyms_lookup_name), \ FN(find_vma), \ + FN(loop), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -6349,4 +6376,78 @@ enum { BTF_F_ZERO = (1ULL << 3), }; +/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value + * has to be adjusted by relocations. It is emitted by llvm and passed to + * libbpf and later to the kernel. + */ +enum bpf_core_relo_kind { + BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */ + BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ + BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ + BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ + BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ + BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */ + BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */ + BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */ + BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ + BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */ +}; + +/* + * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf + * and from libbpf to the kernel. + * + * CO-RE relocation captures the following data: + * - insn_off - instruction offset (in bytes) within a BPF program that needs + * its insn->imm field to be relocated with actual field info; + * - type_id - BTF type ID of the "root" (containing) entity of a relocatable + * type or field; + * - access_str_off - offset into corresponding .BTF string section. String + * interpretation depends on specific relocation kind: + * - for field-based relocations, string encodes an accessed field using + * a sequence of field and array indices, separated by colon (:). It's + * conceptually very close to LLVM's getelementptr ([0]) instruction's + * arguments for identifying offset to a field. + * - for type-based relocations, strings is expected to be just "0"; + * - for enum value-based relocations, string contains an index of enum + * value within its enum type; + * - kind - one of enum bpf_core_relo_kind; + * + * Example: + * struct sample { + * int a; + * struct { + * int b[10]; + * }; + * }; + * + * struct sample *s = ...; + * int *x = &s->a; // encoded as "0:0" (a is field #0) + * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, + * // b is field #0 inside anon struct, accessing elem #5) + * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) + * + * type_id for all relocs in this example will capture BTF type id of + * `struct sample`. + * + * Such relocation is emitted when using __builtin_preserve_access_index() + * Clang built-in, passing expression that captures field address, e.g.: + * + * bpf_probe_read(&dst, sizeof(dst), + * __builtin_preserve_access_index(&src->a.b.c)); + * + * In this case Clang will emit field relocation recording necessary data to + * be able to find offset of embedded `a.b.c` field within `src` struct. + * + * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction + */ +struct bpf_core_relo { + __u32 insn_off; + __u32 type_id; + __u32 access_str_off; + enum bpf_core_relo_kind kind; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 94560ba31724..6b2407e12060 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -50,6 +50,12 @@ # define __NR_bpf 351 # elif defined(__arc__) # define __NR_bpf 280 +# elif defined(__mips__) && defined(_ABIO32) +# define __NR_bpf 4355 +# elif defined(__mips__) && defined(_ABIN32) +# define __NR_bpf 6319 +# elif defined(__mips__) && defined(_ABI64) +# define __NR_bpf 5315 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif @@ -88,146 +94,122 @@ static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int return fd; } -int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr) +int bpf_map_create(enum bpf_map_type map_type, + const char *map_name, + __u32 key_size, + __u32 value_size, + __u32 max_entries, + const struct bpf_map_create_opts *opts) { + const size_t attr_sz = offsetofend(union bpf_attr, map_extra); union bpf_attr attr; int fd; - memset(&attr, '\0', sizeof(attr)); - - attr.map_type = create_attr->map_type; - attr.key_size = create_attr->key_size; - attr.value_size = create_attr->value_size; - attr.max_entries = create_attr->max_entries; - attr.map_flags = create_attr->map_flags; - if (create_attr->name) - memcpy(attr.map_name, create_attr->name, - min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1)); - attr.numa_node = create_attr->numa_node; - attr.btf_fd = create_attr->btf_fd; - attr.btf_key_type_id = create_attr->btf_key_type_id; - attr.btf_value_type_id = create_attr->btf_value_type_id; - attr.map_ifindex = create_attr->map_ifindex; - if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) - attr.btf_vmlinux_value_type_id = - create_attr->btf_vmlinux_value_type_id; - else - attr.inner_map_fd = create_attr->inner_map_fd; - attr.map_extra = create_attr->map_extra; + memset(&attr, 0, attr_sz); + + if (!OPTS_VALID(opts, bpf_map_create_opts)) + return libbpf_err(-EINVAL); + + attr.map_type = map_type; + if (map_name) + strncat(attr.map_name, map_name, sizeof(attr.map_name) - 1); + attr.key_size = key_size; + attr.value_size = value_size; + attr.max_entries = max_entries; + + attr.btf_fd = OPTS_GET(opts, btf_fd, 0); + attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); + attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); + attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); - fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr)); + attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); + attr.map_flags = OPTS_GET(opts, map_flags, 0); + attr.map_extra = OPTS_GET(opts, map_extra, 0); + attr.numa_node = OPTS_GET(opts, numa_node, 0); + attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0); + + fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz); return libbpf_err_errno(fd); } int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) { - struct bpf_create_map_params p = {}; + LIBBPF_OPTS(bpf_map_create_opts, p); - p.map_type = create_attr->map_type; - p.key_size = create_attr->key_size; - p.value_size = create_attr->value_size; - p.max_entries = create_attr->max_entries; p.map_flags = create_attr->map_flags; - p.name = create_attr->name; p.numa_node = create_attr->numa_node; p.btf_fd = create_attr->btf_fd; p.btf_key_type_id = create_attr->btf_key_type_id; p.btf_value_type_id = create_attr->btf_value_type_id; p.map_ifindex = create_attr->map_ifindex; - if (p.map_type == BPF_MAP_TYPE_STRUCT_OPS) - p.btf_vmlinux_value_type_id = - create_attr->btf_vmlinux_value_type_id; + if (create_attr->map_type == BPF_MAP_TYPE_STRUCT_OPS) + p.btf_vmlinux_value_type_id = create_attr->btf_vmlinux_value_type_id; else p.inner_map_fd = create_attr->inner_map_fd; - return libbpf__bpf_create_map_xattr(&p); + return bpf_map_create(create_attr->map_type, create_attr->name, + create_attr->key_size, create_attr->value_size, + create_attr->max_entries, &p); } int bpf_create_map_node(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags, int node) { - struct bpf_create_map_attr map_attr = {}; - - map_attr.name = name; - map_attr.map_type = map_type; - map_attr.map_flags = map_flags; - map_attr.key_size = key_size; - map_attr.value_size = value_size; - map_attr.max_entries = max_entries; + LIBBPF_OPTS(bpf_map_create_opts, opts); + + opts.map_flags = map_flags; if (node >= 0) { - map_attr.numa_node = node; - map_attr.map_flags |= BPF_F_NUMA_NODE; + opts.numa_node = node; + opts.map_flags |= BPF_F_NUMA_NODE; } - return bpf_create_map_xattr(&map_attr); + return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts); } int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags) { - struct bpf_create_map_attr map_attr = {}; + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags); - map_attr.map_type = map_type; - map_attr.map_flags = map_flags; - map_attr.key_size = key_size; - map_attr.value_size = value_size; - map_attr.max_entries = max_entries; - - return bpf_create_map_xattr(&map_attr); + return bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts); } int bpf_create_map_name(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags) { - struct bpf_create_map_attr map_attr = {}; - - map_attr.name = name; - map_attr.map_type = map_type; - map_attr.map_flags = map_flags; - map_attr.key_size = key_size; - map_attr.value_size = value_size; - map_attr.max_entries = max_entries; + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags); - return bpf_create_map_xattr(&map_attr); + return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts); } int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, __u32 map_flags, int node) { - union bpf_attr attr; - int fd; - - memset(&attr, '\0', sizeof(attr)); - - attr.map_type = map_type; - attr.key_size = key_size; - attr.value_size = 4; - attr.inner_map_fd = inner_map_fd; - attr.max_entries = max_entries; - attr.map_flags = map_flags; - if (name) - memcpy(attr.map_name, name, - min(strlen(name), BPF_OBJ_NAME_LEN - 1)); + LIBBPF_OPTS(bpf_map_create_opts, opts); + opts.inner_map_fd = inner_map_fd; + opts.map_flags = map_flags; if (node >= 0) { - attr.map_flags |= BPF_F_NUMA_NODE; - attr.numa_node = node; + opts.map_flags |= BPF_F_NUMA_NODE; + opts.numa_node = node; } - fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr)); - return libbpf_err_errno(fd); + return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts); } int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, __u32 map_flags) { - return bpf_create_map_in_map_node(map_type, name, key_size, - inner_map_fd, max_entries, map_flags, - -1); + LIBBPF_OPTS(bpf_map_create_opts, opts, + .inner_map_fd = inner_map_fd, + .map_flags = map_flags, + ); + + return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts); } static void * @@ -321,10 +303,6 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, if (log_level && !log_buf) return libbpf_err(-EINVAL); - attr.log_level = log_level; - attr.log_buf = ptr_to_u64(log_buf); - attr.log_size = log_size; - func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); func_info = OPTS_GET(opts, func_info, NULL); attr.func_info_rec_size = func_info_rec_size; @@ -339,6 +317,12 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); + if (log_level) { + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_size; + attr.log_level = log_level; + } + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); if (fd >= 0) return fd; @@ -384,16 +368,17 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, goto done; } - if (log_level || !log_buf) - goto done; + if (log_level == 0 && log_buf) { + /* log_level == 0 with non-NULL log_buf requires retrying on error + * with log_level == 1 and log_buf/log_buf_size set, to get details of + * failure + */ + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_size; + attr.log_level = 1; - /* Try again with log */ - log_buf[0] = 0; - attr.log_buf = ptr_to_u64(log_buf); - attr.log_size = log_size; - attr.log_level = 1; - - fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); + } done: /* free() doesn't affect errno, so we don't need to restore it */ free(finfo); @@ -1062,24 +1047,65 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd) return libbpf_err_errno(fd); } -int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, - bool do_log) +int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts) { - union bpf_attr attr = {}; + const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level); + union bpf_attr attr; + char *log_buf; + size_t log_size; + __u32 log_level; int fd; - attr.btf = ptr_to_u64(btf); + memset(&attr, 0, attr_sz); + + if (!OPTS_VALID(opts, bpf_btf_load_opts)) + return libbpf_err(-EINVAL); + + log_buf = OPTS_GET(opts, log_buf, NULL); + log_size = OPTS_GET(opts, log_size, 0); + log_level = OPTS_GET(opts, log_level, 0); + + if (log_size > UINT_MAX) + return libbpf_err(-EINVAL); + if (log_size && !log_buf) + return libbpf_err(-EINVAL); + + attr.btf = ptr_to_u64(btf_data); attr.btf_size = btf_size; + /* log_level == 0 and log_buf != NULL means "try loading without + * log_buf, but retry with log_buf and log_level=1 on error", which is + * consistent across low-level and high-level BTF and program loading + * APIs within libbpf and provides a sensible behavior in practice + */ + if (log_level) { + attr.btf_log_buf = ptr_to_u64(log_buf); + attr.btf_log_size = (__u32)log_size; + attr.btf_log_level = log_level; + } -retry: - if (do_log && log_buf && log_buf_size) { - attr.btf_log_level = 1; - attr.btf_log_size = log_buf_size; + fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); + if (fd < 0 && log_buf && log_level == 0) { attr.btf_log_buf = ptr_to_u64(log_buf); + attr.btf_log_size = (__u32)log_size; + attr.btf_log_level = 1; + fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); } + return libbpf_err_errno(fd); +} + +int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + int fd; - fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr)); +retry: + if (do_log && log_buf && log_buf_size) { + opts.log_buf = log_buf; + opts.log_size = log_buf_size; + opts.log_level = 1; + } + fd = bpf_btf_load(btf, btf_size, &opts); if (fd < 0 && !do_log && log_buf && log_buf_size) { do_log = true; goto retry; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 079cc81ac51e..94e553a0ff9d 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -35,6 +35,30 @@ extern "C" { #endif +struct bpf_map_create_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + + __u32 inner_map_fd; + __u32 map_flags; + __u64 map_extra; + + __u32 numa_node; + __u32 map_ifindex; +}; +#define bpf_map_create_opts__last_field map_ifindex + +LIBBPF_API int bpf_map_create(enum bpf_map_type map_type, + const char *map_name, + __u32 key_size, + __u32 value_size, + __u32 max_entries, + const struct bpf_map_create_opts *opts); + struct bpf_create_map_attr { const char *name; enum bpf_map_type map_type; @@ -53,20 +77,25 @@ struct bpf_create_map_attr { }; }; -LIBBPF_API int -bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") +LIBBPF_API int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") LIBBPF_API int bpf_create_map_node(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags, int node); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") LIBBPF_API int bpf_create_map_name(enum bpf_map_type map_type, const char *name, int key_size, int value_size, int max_entries, __u32 map_flags); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") LIBBPF_API int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, __u32 map_flags, int node); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, int key_size, int inner_map_fd, int max_entries, @@ -166,8 +195,9 @@ struct bpf_load_program_attr { /* Flags to direct loading requirements */ #define MAPS_RELAX_COMPAT 0x01 -/* Recommend log buffer size */ +/* Recommended log buffer size */ #define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */ + LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead") LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, char *log_buf, size_t log_buf_sz); @@ -184,6 +214,23 @@ LIBBPF_API int bpf_verify_program(enum bpf_prog_type type, char *log_buf, size_t log_buf_sz, int log_level); +struct bpf_btf_load_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + + /* kernel log options */ + char *log_buf; + __u32 log_level; + __u32 log_size; +}; +#define bpf_btf_load_opts__last_field log_size + +LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size, + const struct bpf_btf_load_opts *opts); + +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_btf_load() instead") +LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, + __u32 log_buf_size, bool do_log); + LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags); @@ -311,8 +358,6 @@ LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt); LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd); -LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, - __u32 log_buf_size, bool do_log); LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u64 *probe_addr); diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h index cc486a77db65..223308931d55 100644 --- a/tools/lib/bpf/bpf_gen_internal.h +++ b/tools/lib/bpf/bpf_gen_internal.h @@ -39,6 +39,8 @@ struct bpf_gen { int error; struct ksym_relo_desc *relos; int relo_cnt; + struct bpf_core_relo *core_relos; + int core_relo_cnt; char attach_target[128]; int attach_kind; struct ksym_desc *ksyms; @@ -51,7 +53,10 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps); void bpf_gen__free(struct bpf_gen *gen); void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size); -void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx); +void bpf_gen__map_create(struct bpf_gen *gen, + enum bpf_map_type map_type, const char *map_name, + __u32 key_size, __u32 value_size, __u32 max_entries, + struct bpf_map_create_opts *map_attr, int map_idx); void bpf_gen__prog_load(struct bpf_gen *gen, enum bpf_prog_type prog_type, const char *prog_name, const char *license, struct bpf_insn *insns, size_t insn_cnt, @@ -61,5 +66,7 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx); void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type); void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, bool is_typeless, int kind, int insn_idx); +void bpf_gen__record_relo_core(struct bpf_gen *gen, const struct bpf_core_relo *core_relo); +void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int key, int inner_map_idx); #endif diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index fadf089ae8fe..9aa19c89f758 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -454,7 +454,7 @@ const struct btf *btf__base_btf(const struct btf *btf) } /* internal helper returning non-const pointer to a type */ -struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id) +struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id) { if (type_id == 0) return &btf_void; @@ -610,6 +610,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) case BTF_KIND_RESTRICT: case BTF_KIND_VAR: case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: type_id = t->type; break; case BTF_KIND_ARRAY: @@ -1123,54 +1124,86 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf) static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian); -int btf__load_into_kernel(struct btf *btf) +int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level) { - __u32 log_buf_size = 0, raw_size; - char *log_buf = NULL; + LIBBPF_OPTS(bpf_btf_load_opts, opts); + __u32 buf_sz = 0, raw_size; + char *buf = NULL, *tmp; void *raw_data; int err = 0; if (btf->fd >= 0) return libbpf_err(-EEXIST); + if (log_sz && !log_buf) + return libbpf_err(-EINVAL); -retry_load: - if (log_buf_size) { - log_buf = malloc(log_buf_size); - if (!log_buf) - return libbpf_err(-ENOMEM); - - *log_buf = 0; - } - + /* cache native raw data representation */ raw_data = btf_get_raw_data(btf, &raw_size, false); if (!raw_data) { err = -ENOMEM; goto done; } - /* cache native raw data representation */ btf->raw_size = raw_size; btf->raw_data = raw_data; - btf->fd = bpf_load_btf(raw_data, raw_size, log_buf, log_buf_size, false); +retry_load: + /* if log_level is 0, we won't provide log_buf/log_size to the kernel, + * initially. Only if BTF loading fails, we bump log_level to 1 and + * retry, using either auto-allocated or custom log_buf. This way + * non-NULL custom log_buf provides a buffer just in case, but hopes + * for successful load and no need for log_buf. + */ + if (log_level) { + /* if caller didn't provide custom log_buf, we'll keep + * allocating our own progressively bigger buffers for BTF + * verification log + */ + if (!log_buf) { + buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2); + tmp = realloc(buf, buf_sz); + if (!tmp) { + err = -ENOMEM; + goto done; + } + buf = tmp; + buf[0] = '\0'; + } + + opts.log_buf = log_buf ? log_buf : buf; + opts.log_size = log_buf ? log_sz : buf_sz; + opts.log_level = log_level; + } + + btf->fd = bpf_btf_load(raw_data, raw_size, &opts); if (btf->fd < 0) { - if (!log_buf || errno == ENOSPC) { - log_buf_size = max((__u32)BPF_LOG_BUF_SIZE, - log_buf_size << 1); - free(log_buf); + /* time to turn on verbose mode and try again */ + if (log_level == 0) { + log_level = 1; goto retry_load; } + /* only retry if caller didn't provide custom log_buf, but + * make sure we can never overflow buf_sz + */ + if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2) + goto retry_load; err = -errno; - pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno); - if (*log_buf) - pr_warn("%s\n", log_buf); - goto done; + pr_warn("BTF loading error: %d\n", err); + /* don't print out contents of custom log_buf */ + if (!log_buf && buf[0]) + pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf); } done: - free(log_buf); + free(buf); return libbpf_err(err); } + +int btf__load_into_kernel(struct btf *btf) +{ + return btf_load_into_kernel(btf, NULL, 0, 0); +} + int btf__load(struct btf *) __attribute__((alias("btf__load_into_kernel"))); int btf__fd(const struct btf *btf) @@ -2730,15 +2763,11 @@ void btf_ext__free(struct btf_ext *btf_ext) free(btf_ext); } -struct btf_ext *btf_ext__new(__u8 *data, __u32 size) +struct btf_ext *btf_ext__new(const __u8 *data, __u32 size) { struct btf_ext *btf_ext; int err; - err = btf_ext_parse_hdr(data, size); - if (err) - return libbpf_err_ptr(err); - btf_ext = calloc(1, sizeof(struct btf_ext)); if (!btf_ext) return libbpf_err_ptr(-ENOMEM); @@ -2751,6 +2780,10 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size) } memcpy(btf_ext->data, data, size); + err = btf_ext_parse_hdr(btf_ext->data, size); + if (err) + goto done; + if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) { err = -EINVAL; goto done; @@ -3074,7 +3107,7 @@ done: return libbpf_err(err); } -COMPAT_VERSION(bpf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2) +COMPAT_VERSION(btf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2) int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts) { LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext); @@ -3476,8 +3509,8 @@ static long btf_hash_struct(struct btf_type *t) } /* - * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type - * IDs. This check is performed during type graph equivalence check and + * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced + * type IDs. This check is performed during type graph equivalence check and * referenced types equivalence is checked separately. */ static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) @@ -3850,6 +3883,31 @@ static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) return btf_equal_array(t1, t2); } +/* Check if given two types are identical STRUCT/UNION definitions */ +static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2) +{ + const struct btf_member *m1, *m2; + struct btf_type *t1, *t2; + int n, i; + + t1 = btf_type_by_id(d->btf, id1); + t2 = btf_type_by_id(d->btf, id2); + + if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2)) + return false; + + if (!btf_shallow_equal_struct(t1, t2)) + return false; + + m1 = btf_members(t1); + m2 = btf_members(t2); + for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { + if (m1->type != m2->type) + return false; + } + return true; +} + /* * Check equivalence of BTF type graph formed by candidate struct/union (we'll * call it "candidate graph" in this description for brevity) to a type graph @@ -3961,6 +4019,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, hypot_type_id = d->hypot_map[canon_id]; if (hypot_type_id <= BTF_MAX_NR_TYPES) { + if (hypot_type_id == cand_id) + return 1; /* In some cases compiler will generate different DWARF types * for *identical* array type definitions and use them for * different fields within the *same* struct. This breaks type @@ -3969,8 +4029,18 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, * types within a single CU. So work around that by explicitly * allowing identical array types here. */ - return hypot_type_id == cand_id || - btf_dedup_identical_arrays(d, hypot_type_id, cand_id); + if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id)) + return 1; + /* It turns out that similar situation can happen with + * struct/union sometimes, sigh... Handle the case where + * structs/unions are exactly the same, down to the referenced + * type IDs. Anything more complicated (e.g., if referenced + * types are different, but equivalent) is *way more* + * complicated and requires a many-to-many equivalence mapping. + */ + if (btf_dedup_identical_structs(d, hypot_type_id, cand_id)) + return 1; + return 0; } if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) @@ -4023,6 +4093,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: + case BTF_KIND_TYPE_TAG: if (cand_type->info != canon_type->info) return 0; return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 5c73a5b0a044..742a2bf71c5e 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -157,7 +157,7 @@ LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, __u32 expected_value_size, __u32 *key_type_id, __u32 *value_type_id); -LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size); +LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size); LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 05f3e7dfec0a..f06a1d343c92 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -2216,7 +2216,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d, __u8 bits_offset, __u8 bit_sz) { - int size, err; + int size, err = 0; size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset); if (size < 0) diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c index c28ab24d1bff..8ed02e89c9a9 100644 --- a/tools/lib/bpf/gen_loader.c +++ b/tools/lib/bpf/gen_loader.c @@ -445,47 +445,33 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data, } void bpf_gen__map_create(struct bpf_gen *gen, - struct bpf_create_map_params *map_attr, int map_idx) + enum bpf_map_type map_type, + const char *map_name, + __u32 key_size, __u32 value_size, __u32 max_entries, + struct bpf_map_create_opts *map_attr, int map_idx) { - int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id); + int attr_size = offsetofend(union bpf_attr, map_extra); bool close_inner_map_fd = false; int map_create_attr, idx; union bpf_attr attr; memset(&attr, 0, attr_size); - attr.map_type = map_attr->map_type; - attr.key_size = map_attr->key_size; - attr.value_size = map_attr->value_size; + attr.map_type = map_type; + attr.key_size = key_size; + attr.value_size = value_size; attr.map_flags = map_attr->map_flags; attr.map_extra = map_attr->map_extra; - memcpy(attr.map_name, map_attr->name, - min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1)); + if (map_name) + memcpy(attr.map_name, map_name, + min((unsigned)strlen(map_name), BPF_OBJ_NAME_LEN - 1)); attr.numa_node = map_attr->numa_node; attr.map_ifindex = map_attr->map_ifindex; - attr.max_entries = map_attr->max_entries; - switch (attr.map_type) { - case BPF_MAP_TYPE_PERF_EVENT_ARRAY: - case BPF_MAP_TYPE_CGROUP_ARRAY: - case BPF_MAP_TYPE_STACK_TRACE: - case BPF_MAP_TYPE_ARRAY_OF_MAPS: - case BPF_MAP_TYPE_HASH_OF_MAPS: - case BPF_MAP_TYPE_DEVMAP: - case BPF_MAP_TYPE_DEVMAP_HASH: - case BPF_MAP_TYPE_CPUMAP: - case BPF_MAP_TYPE_XSKMAP: - case BPF_MAP_TYPE_SOCKMAP: - case BPF_MAP_TYPE_SOCKHASH: - case BPF_MAP_TYPE_QUEUE: - case BPF_MAP_TYPE_STACK: - case BPF_MAP_TYPE_RINGBUF: - break; - default: - attr.btf_key_type_id = map_attr->btf_key_type_id; - attr.btf_value_type_id = map_attr->btf_value_type_id; - } + attr.max_entries = max_entries; + attr.btf_key_type_id = map_attr->btf_key_type_id; + attr.btf_value_type_id = map_attr->btf_value_type_id; pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n", - attr.map_name, map_idx, map_attr->map_type, attr.btf_value_type_id); + attr.map_name, map_idx, map_type, attr.btf_value_type_id); map_create_attr = add_data(gen, &attr, attr_size); if (attr.btf_value_type_id) @@ -512,7 +498,7 @@ void bpf_gen__map_create(struct bpf_gen *gen, /* emit MAP_CREATE command */ emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size); debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d", - attr.map_name, map_idx, map_attr->map_type, attr.value_size, + attr.map_name, map_idx, map_type, value_size, attr.btf_value_type_id); emit_check_err(gen); /* remember map_fd in the stack, if successful */ @@ -701,27 +687,29 @@ static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo return; } kdesc->off = btf_fd_idx; - /* set a default value for imm */ + /* jump to success case */ + emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); + /* set value for imm, off as 0 */ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); - /* skip success case store if ret < 0 */ - emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 1)); + emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); + /* skip success case for ret < 0 */ + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10)); /* store btf_id into insn[insn_idx].imm */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); + /* obtain fd in BPF_REG_9 */ + emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7)); + emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); + /* jump to fd_array store if fd denotes module BTF */ + emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2)); + /* set the default value for off */ + emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); + /* skip BTF fd store for vmlinux BTF */ + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4)); /* load fd_array slot pointer */ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx))); - /* skip store of BTF fd if ret < 0 */ - emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 3)); /* store BTF fd in slot */ - emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7)); - emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0)); - /* set a default value for off */ - emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); - /* skip insn->off store if ret < 0 */ - emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 2)); - /* skip if vmlinux BTF */ - emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1)); /* store index into insn[insn_idx].off */ emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx)); log: @@ -820,9 +808,8 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, kdesc->insn + offsetof(struct bpf_insn, imm)); move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); - emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_8, offsetof(struct bpf_insn, imm))); - /* jump over src_reg adjustment if imm is not 0 */ - emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 3)); + /* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */ + emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3)); goto clear_src_reg; } /* remember insn offset, so we can copy BTF ID and FD later */ @@ -830,17 +817,20 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, emit_bpf_find_by_name_kind(gen, relo); if (!relo->is_weak) emit_check_err(gen); - /* set default values as 0 */ + /* jump to success case */ + emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); + /* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0)); - /* skip success case stores if ret < 0 */ - emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 4)); + /* skip success case for ret < 0 */ + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4)); /* store btf_id into insn[insn_idx].imm */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); /* store btf_obj_fd into insn[insn_idx + 1].imm */ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32)); emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); + /* skip src_reg adjustment */ emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); clear_src_reg: /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */ @@ -852,6 +842,22 @@ clear_src_reg: emit_ksym_relo_log(gen, relo, kdesc->ref); } +void bpf_gen__record_relo_core(struct bpf_gen *gen, + const struct bpf_core_relo *core_relo) +{ + struct bpf_core_relo *relos; + + relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos)); + if (!relos) { + gen->error = -ENOMEM; + return; + } + gen->core_relos = relos; + relos += gen->core_relo_cnt; + memcpy(relos, core_relo, sizeof(*relos)); + gen->core_relo_cnt++; +} + static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns) { int insn; @@ -884,6 +890,15 @@ static void emit_relos(struct bpf_gen *gen, int insns) emit_relo(gen, gen->relos + i, insns); } +static void cleanup_core_relo(struct bpf_gen *gen) +{ + if (!gen->core_relo_cnt) + return; + free(gen->core_relos); + gen->core_relo_cnt = 0; + gen->core_relos = NULL; +} + static void cleanup_relos(struct bpf_gen *gen, int insns) { int i, insn; @@ -911,6 +926,7 @@ static void cleanup_relos(struct bpf_gen *gen, int insns) gen->relo_cnt = 0; gen->relos = NULL; } + cleanup_core_relo(gen); } void bpf_gen__prog_load(struct bpf_gen *gen, @@ -918,12 +934,13 @@ void bpf_gen__prog_load(struct bpf_gen *gen, const char *license, struct bpf_insn *insns, size_t insn_cnt, struct bpf_prog_load_opts *load_attr, int prog_idx) { - int attr_size = offsetofend(union bpf_attr, fd_array); - int prog_load_attr, license_off, insns_off, func_info, line_info; + int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos; + int attr_size = offsetofend(union bpf_attr, core_relo_rec_size); union bpf_attr attr; memset(&attr, 0, attr_size); - pr_debug("gen: prog_load: type %d insns_cnt %zd\n", prog_type, insn_cnt); + pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n", + prog_type, insn_cnt, prog_idx); /* add license string to blob of bytes */ license_off = add_data(gen, license, strlen(license) + 1); /* add insns to blob of bytes */ @@ -947,6 +964,11 @@ void bpf_gen__prog_load(struct bpf_gen *gen, line_info = add_data(gen, load_attr->line_info, attr.line_info_cnt * attr.line_info_rec_size); + attr.core_relo_rec_size = sizeof(struct bpf_core_relo); + attr.core_relo_cnt = gen->core_relo_cnt; + core_relos = add_data(gen, gen->core_relos, + attr.core_relo_cnt * attr.core_relo_rec_size); + memcpy(attr.prog_name, prog_name, min((unsigned)strlen(prog_name), BPF_OBJ_NAME_LEN - 1)); prog_load_attr = add_data(gen, &attr, attr_size); @@ -963,6 +985,9 @@ void bpf_gen__prog_load(struct bpf_gen *gen, /* populate union bpf_attr with a pointer to line_info */ emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info); + /* populate union bpf_attr with a pointer to core_relos */ + emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos); + /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */ emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array); @@ -993,9 +1018,11 @@ void bpf_gen__prog_load(struct bpf_gen *gen, debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt); /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */ cleanup_relos(gen, insns_off); - if (gen->attach_kind) + if (gen->attach_kind) { emit_sys_close_blob(gen, attr_field(prog_load_attr, attach_btf_obj_fd)); + gen->attach_kind = 0; + } emit_check_err(gen); /* remember prog_fd in the stack, if successful */ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, @@ -1041,6 +1068,33 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue, emit_check_err(gen); } +void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot, + int inner_map_idx) +{ + int attr_size = offsetofend(union bpf_attr, flags); + int map_update_attr, key; + union bpf_attr attr; + + memset(&attr, 0, attr_size); + pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n", + outer_map_idx, slot, inner_map_idx); + + key = add_data(gen, &slot, sizeof(slot)); + + map_update_attr = add_data(gen, &attr, attr_size); + move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, + blob_fd_array_off(gen, outer_map_idx)); + emit_rel_store(gen, attr_field(map_update_attr, key), key); + emit_rel_store(gen, attr_field(map_update_attr, value), + blob_fd_array_off(gen, inner_map_idx)); + + /* emit MAP_UPDATE_ELEM command */ + emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size); + debug_ret(gen, "populate_outer_map outer %d key %d inner %d", + outer_map_idx, slot, inner_map_idx); + emit_check_err(gen); +} + void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx) { int attr_size = offsetofend(union bpf_attr, map_fd); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index f6faa33c80fa..902f1ad5b7e6 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -168,6 +168,25 @@ int libbpf_set_strict_mode(enum libbpf_strict_mode mode) return 0; } +__u32 libbpf_major_version(void) +{ + return LIBBPF_MAJOR_VERSION; +} + +__u32 libbpf_minor_version(void) +{ + return LIBBPF_MINOR_VERSION; +} + +const char *libbpf_version_string(void) +{ +#define __S(X) #X +#define _S(X) __S(X) + return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION); +#undef _S +#undef __S +} + enum kern_feature_id { /* v4.14: kernel support for program & map names. */ FEAT_PROG_NAME, @@ -211,13 +230,19 @@ enum reloc_type { RELO_EXTERN_VAR, RELO_EXTERN_FUNC, RELO_SUBPROG_ADDR, + RELO_CORE, }; struct reloc_desc { enum reloc_type type; int insn_idx; - int map_idx; - int sym_off; + union { + const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */ + struct { + int map_idx; + int sym_off; + }; + }; }; struct bpf_sec_def; @@ -306,7 +331,11 @@ struct bpf_program { struct reloc_desc *reloc_desc; int nr_reloc; - int log_level; + + /* BPF verifier log settings */ + char *log_buf; + size_t log_size; + __u32 log_level; struct { int nr; @@ -402,6 +431,7 @@ struct bpf_map { char *pin_path; bool pinned; bool reused; + bool skipped; __u64 map_extra; }; @@ -548,6 +578,11 @@ struct bpf_object { size_t btf_module_cnt; size_t btf_module_cap; + /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */ + char *log_buf; + size_t log_size; + __u32 log_level; + void *priv; bpf_object_clear_priv_t clear_priv; @@ -683,6 +718,9 @@ bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog, prog->instances.fds = NULL; prog->instances.nr = -1; + /* inherit object's log_level */ + prog->log_level = obj->log_level; + prog->sec_name = strdup(sec_name); if (!prog->sec_name) goto errout; @@ -2258,6 +2296,9 @@ int parse_btf_map_def(const char *map_name, struct btf *btf, map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE; } else if (strcmp(name, "values") == 0) { + bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type); + bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY; + const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value"; char inner_map_name[128]; int err; @@ -2271,8 +2312,8 @@ int parse_btf_map_def(const char *map_name, struct btf *btf, map_name, name); return -EINVAL; } - if (!bpf_map_type__is_map_in_map(map_def->map_type)) { - pr_warn("map '%s': should be map-in-map.\n", + if (!is_map_in_map && !is_prog_array) { + pr_warn("map '%s': should be map-in-map or prog-array.\n", map_name); return -ENOTSUP; } @@ -2284,22 +2325,30 @@ int parse_btf_map_def(const char *map_name, struct btf *btf, map_def->value_size = 4; t = btf__type_by_id(btf, m->type); if (!t) { - pr_warn("map '%s': map-in-map inner type [%d] not found.\n", - map_name, m->type); + pr_warn("map '%s': %s type [%d] not found.\n", + map_name, desc, m->type); return -EINVAL; } if (!btf_is_array(t) || btf_array(t)->nelems) { - pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n", - map_name); + pr_warn("map '%s': %s spec is not a zero-sized array.\n", + map_name, desc); return -EINVAL; } t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL); if (!btf_is_ptr(t)) { - pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", - map_name, btf_kind_str(t)); + pr_warn("map '%s': %s def is of unexpected kind %s.\n", + map_name, desc, btf_kind_str(t)); return -EINVAL; } t = skip_mods_and_typedefs(btf, t->type, NULL); + if (is_prog_array) { + if (!btf_is_func_proto(t)) { + pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n", + map_name, btf_kind_str(t)); + return -EINVAL; + } + continue; + } if (!btf_is_struct(t)) { pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", map_name, btf_kind_str(t)); @@ -2981,7 +3030,9 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) */ btf__set_fd(kern_btf, 0); } else { - err = btf__load_into_kernel(kern_btf); + /* currently BPF_BTF_LOAD only supports log_level 1 */ + err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size, + obj->log_level ? 1 : 0); } if (sanitize) { if (!err) { @@ -3350,7 +3401,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj) /* sort BPF programs by section name and in-section instruction offset * for faster search */ - qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); + if (obj->nr_programs) + qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); return bpf_object__init_btf(obj, btf_data, btf_ext_data); } @@ -4342,7 +4394,6 @@ static int probe_kern_prog_name(void) static int probe_kern_global_data(void) { - struct bpf_create_map_attr map_attr; char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16), @@ -4352,13 +4403,7 @@ static int probe_kern_global_data(void) }; int ret, map, insn_cnt = ARRAY_SIZE(insns); - memset(&map_attr, 0, sizeof(map_attr)); - map_attr.map_type = BPF_MAP_TYPE_ARRAY; - map_attr.key_size = sizeof(int); - map_attr.value_size = 32; - map_attr.max_entries = 1; - - map = bpf_create_map_xattr(&map_attr); + map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL); if (map < 0) { ret = -errno; cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); @@ -4488,15 +4533,11 @@ static int probe_kern_btf_type_tag(void) static int probe_kern_array_mmap(void) { - struct bpf_create_map_attr attr = { - .map_type = BPF_MAP_TYPE_ARRAY, - .map_flags = BPF_F_MMAPABLE, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 1, - }; + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE); + int fd; - return probe_fd(bpf_create_map_xattr(&attr)); + fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(int), 1, &opts); + return probe_fd(fd); } static int probe_kern_exp_attach_type(void) @@ -4535,7 +4576,6 @@ static int probe_kern_probe_read_kernel(void) static int probe_prog_bind_map(void) { - struct bpf_create_map_attr map_attr; char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), @@ -4543,13 +4583,7 @@ static int probe_prog_bind_map(void) }; int ret, map, prog, insn_cnt = ARRAY_SIZE(insns); - memset(&map_attr, 0, sizeof(map_attr)); - map_attr.map_type = BPF_MAP_TYPE_ARRAY; - map_attr.key_size = sizeof(int); - map_attr.value_size = 32; - map_attr.max_entries = 1; - - map = bpf_create_map_xattr(&map_attr); + map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL); if (map < 0) { ret = -errno; cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); @@ -4820,19 +4854,16 @@ static void bpf_map__destroy(struct bpf_map *map); static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) { - struct bpf_create_map_params create_attr; + LIBBPF_OPTS(bpf_map_create_opts, create_attr); struct bpf_map_def *def = &map->def; + const char *map_name = NULL; + __u32 max_entries; int err = 0; - memset(&create_attr, 0, sizeof(create_attr)); - if (kernel_supports(obj, FEAT_PROG_NAME)) - create_attr.name = map->name; + map_name = map->name; create_attr.map_ifindex = map->map_ifindex; - create_attr.map_type = def->type; create_attr.map_flags = def->map_flags; - create_attr.key_size = def->key_size; - create_attr.value_size = def->value_size; create_attr.numa_node = map->numa_node; create_attr.map_extra = map->map_extra; @@ -4846,18 +4877,14 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b return nr_cpus; } pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); - create_attr.max_entries = nr_cpus; + max_entries = nr_cpus; } else { - create_attr.max_entries = def->max_entries; + max_entries = def->max_entries; } if (bpf_map__is_struct_ops(map)) - create_attr.btf_vmlinux_value_type_id = - map->btf_vmlinux_value_type_id; + create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; - create_attr.btf_fd = 0; - create_attr.btf_key_type_id = 0; - create_attr.btf_value_type_id = 0; if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { create_attr.btf_fd = btf__fd(obj->btf); create_attr.btf_key_type_id = map->btf_key_type_id; @@ -4903,13 +4930,17 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b } if (obj->gen_loader) { - bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps); + bpf_gen__map_create(obj->gen_loader, def->type, map_name, + def->key_size, def->value_size, max_entries, + &create_attr, is_inner ? -1 : map - obj->maps); /* Pretend to have valid FD to pass various fd >= 0 checks. * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. */ map->fd = 0; } else { - map->fd = libbpf__bpf_create_map_xattr(&create_attr); + map->fd = bpf_map_create(def->type, map_name, + def->key_size, def->value_size, + max_entries, &create_attr); } if (map->fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) { @@ -4924,7 +4955,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b create_attr.btf_value_type_id = 0; map->btf_key_type_id = 0; map->btf_value_type_id = 0; - map->fd = libbpf__bpf_create_map_xattr(&create_attr); + map->fd = bpf_map_create(def->type, map_name, + def->key_size, def->value_size, + max_entries, &create_attr); } err = map->fd < 0 ? -errno : 0; @@ -4939,7 +4972,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b return err; } -static int init_map_slots(struct bpf_object *obj, struct bpf_map *map) +static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map) { const struct bpf_map *targ_map; unsigned int i; @@ -4951,18 +4984,18 @@ static int init_map_slots(struct bpf_object *obj, struct bpf_map *map) targ_map = map->init_slots[i]; fd = bpf_map__fd(targ_map); + if (obj->gen_loader) { - pr_warn("// TODO map_update_elem: idx %td key %d value==map_idx %td\n", - map - obj->maps, i, targ_map - obj->maps); - return -ENOTSUP; + bpf_gen__populate_outer_map(obj->gen_loader, + map - obj->maps, i, + targ_map - obj->maps); } else { err = bpf_map_update_elem(map->fd, &i, &fd, 0); } if (err) { err = -errno; pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", - map->name, i, targ_map->name, - fd, err); + map->name, i, targ_map->name, fd, err); return err; } pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", @@ -4975,6 +5008,59 @@ static int init_map_slots(struct bpf_object *obj, struct bpf_map *map) return 0; } +static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map) +{ + const struct bpf_program *targ_prog; + unsigned int i; + int fd, err; + + if (obj->gen_loader) + return -ENOTSUP; + + for (i = 0; i < map->init_slots_sz; i++) { + if (!map->init_slots[i]) + continue; + + targ_prog = map->init_slots[i]; + fd = bpf_program__fd(targ_prog); + + err = bpf_map_update_elem(map->fd, &i, &fd, 0); + if (err) { + err = -errno; + pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n", + map->name, i, targ_prog->name, fd, err); + return err; + } + pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n", + map->name, i, targ_prog->name, fd); + } + + zfree(&map->init_slots); + map->init_slots_sz = 0; + + return 0; +} + +static int bpf_object_init_prog_arrays(struct bpf_object *obj) +{ + struct bpf_map *map; + int i, err; + + for (i = 0; i < obj->nr_maps; i++) { + map = &obj->maps[i]; + + if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY) + continue; + + err = init_prog_array_slots(obj, map); + if (err < 0) { + zclose(map->fd); + return err; + } + } + return 0; +} + static int bpf_object__create_maps(struct bpf_object *obj) { @@ -4987,6 +5073,26 @@ bpf_object__create_maps(struct bpf_object *obj) for (i = 0; i < obj->nr_maps; i++) { map = &obj->maps[i]; + /* To support old kernels, we skip creating global data maps + * (.rodata, .data, .kconfig, etc); later on, during program + * loading, if we detect that at least one of the to-be-loaded + * programs is referencing any global data map, we'll error + * out with program name and relocation index logged. + * This approach allows to accommodate Clang emitting + * unnecessary .rodata.str1.1 sections for string literals, + * but also it allows to have CO-RE applications that use + * global variables in some of BPF programs, but not others. + * If those global variable-using programs are not loaded at + * runtime due to bpf_program__set_autoload(prog, false), + * bpf_object loading will succeed just fine even on old + * kernels. + */ + if (bpf_map__is_internal(map) && + !kernel_supports(obj, FEAT_GLOBAL_DATA)) { + map->skipped = true; + continue; + } + retried = false; retry: if (map->pin_path) { @@ -5023,8 +5129,8 @@ retry: } } - if (map->init_slots_sz) { - err = init_map_slots(obj, map); + if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) { + err = init_map_in_map_slots(obj, map); if (err < 0) { zclose(map->fd); goto err_out; @@ -5096,15 +5202,18 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand, struct bpf_core_cand_list *cands) { struct bpf_core_cand *new_cands, *cand; - const struct btf_type *t; - const char *targ_name; + const struct btf_type *t, *local_t; + const char *targ_name, *local_name; size_t targ_essent_len; int n, i; + local_t = btf__type_by_id(local_cand->btf, local_cand->id); + local_name = btf__str_by_offset(local_cand->btf, local_t->name_off); + n = btf__type_cnt(targ_btf); for (i = targ_start_id; i < n; i++) { t = btf__type_by_id(targ_btf, i); - if (btf_kind(t) != btf_kind(local_cand->t)) + if (btf_kind(t) != btf_kind(local_t)) continue; targ_name = btf__name_by_offset(targ_btf, t->name_off); @@ -5115,12 +5224,12 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand, if (targ_essent_len != local_essent_len) continue; - if (strncmp(local_cand->name, targ_name, local_essent_len) != 0) + if (strncmp(local_name, targ_name, local_essent_len) != 0) continue; pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n", - local_cand->id, btf_kind_str(local_cand->t), - local_cand->name, i, btf_kind_str(t), targ_name, + local_cand->id, btf_kind_str(local_t), + local_name, i, btf_kind_str(t), targ_name, targ_btf_name); new_cands = libbpf_reallocarray(cands->cands, cands->len + 1, sizeof(*cands->cands)); @@ -5129,8 +5238,6 @@ static int bpf_core_add_cands(struct bpf_core_cand *local_cand, cand = &new_cands[cands->len]; cand->btf = targ_btf; - cand->t = t; - cand->name = targ_name; cand->id = i; cands->cands = new_cands; @@ -5237,18 +5344,21 @@ bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 l struct bpf_core_cand local_cand = {}; struct bpf_core_cand_list *cands; const struct btf *main_btf; + const struct btf_type *local_t; + const char *local_name; size_t local_essent_len; int err, i; local_cand.btf = local_btf; - local_cand.t = btf__type_by_id(local_btf, local_type_id); - if (!local_cand.t) + local_cand.id = local_type_id; + local_t = btf__type_by_id(local_btf, local_type_id); + if (!local_t) return ERR_PTR(-EINVAL); - local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off); - if (str_is_empty(local_cand.name)) + local_name = btf__name_by_offset(local_btf, local_t->name_off); + if (str_is_empty(local_name)) return ERR_PTR(-EINVAL); - local_essent_len = bpf_core_essential_name_len(local_cand.name); + local_essent_len = bpf_core_essential_name_len(local_name); cands = calloc(1, sizeof(*cands)); if (!cands) @@ -5398,12 +5508,31 @@ static void *u32_as_hash_key(__u32 x) return (void *)(uintptr_t)x; } +static int record_relo_core(struct bpf_program *prog, + const struct bpf_core_relo *core_relo, int insn_idx) +{ + struct reloc_desc *relos, *relo; + + relos = libbpf_reallocarray(prog->reloc_desc, + prog->nr_reloc + 1, sizeof(*relos)); + if (!relos) + return -ENOMEM; + relo = &relos[prog->nr_reloc]; + relo->type = RELO_CORE; + relo->insn_idx = insn_idx; + relo->core_relo = core_relo; + prog->reloc_desc = relos; + prog->nr_reloc++; + return 0; +} + static int bpf_core_apply_relo(struct bpf_program *prog, const struct bpf_core_relo *relo, int relo_idx, const struct btf *local_btf, struct hashmap *cand_cache) { + struct bpf_core_spec specs_scratch[3] = {}; const void *type_key = u32_as_hash_key(relo->type_id); struct bpf_core_cand_list *cands = NULL; const char *prog_name = prog->name; @@ -5434,13 +5563,15 @@ static int bpf_core_apply_relo(struct bpf_program *prog, return -EINVAL; if (prog->obj->gen_loader) { - pr_warn("// TODO core_relo: prog %td insn[%d] %s kind %d\n", + const char *spec_str = btf__name_by_offset(local_btf, relo->access_str_off); + + pr_debug("record_relo_core: prog %td insn[%d] %s %s %s final insn_idx %d\n", prog - prog->obj->programs, relo->insn_off / 8, - local_name, relo->kind); - return -ENOTSUP; + btf_kind_str(local_type), local_name, spec_str, insn_idx); + return record_relo_core(prog, relo, insn_idx); } - if (relo->kind != BPF_TYPE_ID_LOCAL && + if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && !hashmap__find(cand_cache, type_key, (void **)&cands)) { cands = bpf_core_find_cands(prog->obj, local_btf, local_id); if (IS_ERR(cands)) { @@ -5456,7 +5587,8 @@ static int bpf_core_apply_relo(struct bpf_program *prog, } } - return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo, relo_idx, local_btf, cands); + return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo, + relo_idx, local_btf, cands, specs_scratch); } static int @@ -5586,6 +5718,13 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; insn[0].imm = relo->map_idx; } else { + const struct bpf_map *map = &obj->maps[relo->map_idx]; + + if (map->skipped) { + pr_warn("prog '%s': relo #%d: kernel doesn't support global data\n", + prog->name, i); + return -ENOTSUP; + } insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; insn[0].imm = obj->maps[relo->map_idx].fd; } @@ -5634,6 +5773,9 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) case RELO_CALL: /* handled already */ break; + case RELO_CORE: + /* will be handled by bpf_program_record_relos() */ + break; default: pr_warn("prog '%s': relo #%d: bad relo type %d\n", prog->name, i, relo->type); @@ -5797,6 +5939,8 @@ static int cmp_relo_by_insn_idx(const void *key, const void *elem) static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx) { + if (!prog->nr_reloc) + return NULL; return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); } @@ -5812,8 +5956,9 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); if (!relos) return -ENOMEM; - memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, - sizeof(*relos) * subprog->nr_reloc); + if (subprog->nr_reloc) + memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, + sizeof(*relos) * subprog->nr_reloc); for (i = main_prog->nr_reloc; i < new_cnt; i++) relos[i].insn_idx += subprog->sub_insn_off; @@ -6071,6 +6216,35 @@ bpf_object__free_relocs(struct bpf_object *obj) } } +static int cmp_relocs(const void *_a, const void *_b) +{ + const struct reloc_desc *a = _a; + const struct reloc_desc *b = _b; + + if (a->insn_idx != b->insn_idx) + return a->insn_idx < b->insn_idx ? -1 : 1; + + /* no two relocations should have the same insn_idx, but ... */ + if (a->type != b->type) + return a->type < b->type ? -1 : 1; + + return 0; +} + +static void bpf_object__sort_relos(struct bpf_object *obj) +{ + int i; + + for (i = 0; i < obj->nr_programs; i++) { + struct bpf_program *p = &obj->programs[i]; + + if (!p->nr_reloc) + continue; + + qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); + } +} + static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) { @@ -6085,6 +6259,8 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) err); return err; } + if (obj->gen_loader) + bpf_object__sort_relos(obj); } /* Before relocating calls pre-process relocations and mark @@ -6120,6 +6296,8 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) */ if (prog_is_subprog(obj, prog)) continue; + if (!prog->load) + continue; err = bpf_object__relocate_calls(obj, prog); if (err) { @@ -6133,6 +6311,8 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) prog = &obj->programs[i]; if (prog_is_subprog(obj, prog)) continue; + if (!prog->load) + continue; err = bpf_object__relocate_data(obj, prog); if (err) { pr_warn("prog '%s': failed to relocate data references: %d\n", @@ -6155,9 +6335,11 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, int i, j, nrels, new_sz; const struct btf_var_secinfo *vi = NULL; const struct btf_type *sec, *var, *def; - struct bpf_map *map = NULL, *targ_map; + struct bpf_map *map = NULL, *targ_map = NULL; + struct bpf_program *targ_prog = NULL; + bool is_prog_array, is_map_in_map; const struct btf_member *member; - const char *name, *mname; + const char *name, *mname, *type; unsigned int moff; Elf64_Sym *sym; Elf64_Rel *rel; @@ -6184,11 +6366,6 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, return -LIBBPF_ERRNO__FORMAT; } name = elf_sym_str(obj, sym->st_name) ?: "<?>"; - if (sym->st_shndx != obj->efile.btf_maps_shndx) { - pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", - i, name); - return -LIBBPF_ERRNO__RELOC; - } pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n", i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value, @@ -6210,19 +6387,45 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, return -EINVAL; } - if (!bpf_map_type__is_map_in_map(map->def.type)) - return -EINVAL; - if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && - map->def.key_size != sizeof(int)) { - pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", - i, map->name, sizeof(int)); + is_map_in_map = bpf_map_type__is_map_in_map(map->def.type); + is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY; + type = is_map_in_map ? "map" : "prog"; + if (is_map_in_map) { + if (sym->st_shndx != obj->efile.btf_maps_shndx) { + pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", + i, name); + return -LIBBPF_ERRNO__RELOC; + } + if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && + map->def.key_size != sizeof(int)) { + pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", + i, map->name, sizeof(int)); + return -EINVAL; + } + targ_map = bpf_object__find_map_by_name(obj, name); + if (!targ_map) { + pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n", + i, name); + return -ESRCH; + } + } else if (is_prog_array) { + targ_prog = bpf_object__find_program_by_name(obj, name); + if (!targ_prog) { + pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n", + i, name); + return -ESRCH; + } + if (targ_prog->sec_idx != sym->st_shndx || + targ_prog->sec_insn_off * 8 != sym->st_value || + prog_is_subprog(obj, targ_prog)) { + pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n", + i, name); + return -LIBBPF_ERRNO__RELOC; + } + } else { return -EINVAL; } - targ_map = bpf_object__find_map_by_name(obj, name); - if (!targ_map) - return -ESRCH; - var = btf__type_by_id(obj->btf, vi->type); def = skip_mods_and_typedefs(obj->btf, var->type, NULL); if (btf_vlen(def) == 0) @@ -6253,30 +6456,15 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, (new_sz - map->init_slots_sz) * host_ptr_sz); map->init_slots_sz = new_sz; } - map->init_slots[moff] = targ_map; + map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog; - pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n", - i, map->name, moff, name); + pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n", + i, map->name, moff, type, name); } return 0; } -static int cmp_relocs(const void *_a, const void *_b) -{ - const struct reloc_desc *a = _a; - const struct reloc_desc *b = _b; - - if (a->insn_idx != b->insn_idx) - return a->insn_idx < b->insn_idx ? -1 : 1; - - /* no two relocations should have the same insn_idx, but ... */ - if (a->type != b->type) - return a->type < b->type ? -1 : 1; - - return 0; -} - static int bpf_object__collect_relos(struct bpf_object *obj) { int i, err; @@ -6309,14 +6497,7 @@ static int bpf_object__collect_relos(struct bpf_object *obj) return err; } - for (i = 0; i < obj->nr_programs; i++) { - struct bpf_program *p = &obj->programs[i]; - - if (!p->nr_reloc) - continue; - - qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); - } + bpf_object__sort_relos(obj); return 0; } @@ -6419,8 +6600,10 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog const char *prog_name = NULL; char *cp, errmsg[STRERR_BUFSIZE]; size_t log_buf_size = 0; - char *log_buf = NULL; + char *log_buf = NULL, *tmp; int btf_fd, ret, err; + bool own_log_buf = true; + __u32 log_level = prog->log_level; if (prog->type == BPF_PROG_TYPE_UNSPEC) { /* @@ -6438,7 +6621,6 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog load_attr.expected_attach_type = prog->expected_attach_type; if (kernel_supports(obj, FEAT_PROG_NAME)) prog_name = prog->name; - load_attr.attach_btf_id = prog->attach_btf_id; load_attr.attach_prog_fd = prog->attach_prog_fd; load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; load_attr.attach_btf_id = prog->attach_btf_id; @@ -6456,7 +6638,7 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog load_attr.line_info_rec_size = prog->line_info_rec_size; load_attr.line_info_cnt = prog->line_info_cnt; } - load_attr.log_level = prog->log_level; + load_attr.log_level = log_level; load_attr.prog_flags = prog->prog_flags; load_attr.fd_array = obj->fd_array; @@ -6477,22 +6659,45 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog *prog_fd = -1; return 0; } -retry_load: - if (log_buf_size) { - log_buf = malloc(log_buf_size); - if (!log_buf) - return -ENOMEM; - *log_buf = 0; +retry_load: + /* if log_level is zero, we don't request logs initiallly even if + * custom log_buf is specified; if the program load fails, then we'll + * bump log_level to 1 and use either custom log_buf or we'll allocate + * our own and retry the load to get details on what failed + */ + if (log_level) { + if (prog->log_buf) { + log_buf = prog->log_buf; + log_buf_size = prog->log_size; + own_log_buf = false; + } else if (obj->log_buf) { + log_buf = obj->log_buf; + log_buf_size = obj->log_size; + own_log_buf = false; + } else { + log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2); + tmp = realloc(log_buf, log_buf_size); + if (!tmp) { + ret = -ENOMEM; + goto out; + } + log_buf = tmp; + log_buf[0] = '\0'; + own_log_buf = true; + } } load_attr.log_buf = log_buf; load_attr.log_size = log_buf_size; - ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr); + load_attr.log_level = log_level; + ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr); if (ret >= 0) { - if (log_buf && load_attr.log_level) - pr_debug("verifier log:\n%s", log_buf); + if (log_level && own_log_buf) { + pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", + prog->name, log_buf); + } if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) { struct bpf_map *map; @@ -6505,8 +6710,8 @@ retry_load: if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) { cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warn("prog '%s': failed to bind .rodata map: %s\n", - prog->name, cp); + pr_warn("prog '%s': failed to bind map '%s': %s\n", + prog->name, map->real_name, cp); /* Don't fail hard if can't bind rodata. */ } } @@ -6517,49 +6722,41 @@ retry_load: goto out; } - if (!log_buf || errno == ENOSPC) { - log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, - log_buf_size << 1); - - free(log_buf); + if (log_level == 0) { + log_level = 1; goto retry_load; } - ret = errno ? -errno : -LIBBPF_ERRNO__LOAD; + /* On ENOSPC, increase log buffer size and retry, unless custom + * log_buf is specified. + * Be careful to not overflow u32, though. Kernel's log buf size limit + * isn't part of UAPI so it can always be bumped to full 4GB. So don't + * multiply by 2 unless we are sure we'll fit within 32 bits. + * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2). + */ + if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2) + goto retry_load; + + ret = -errno; cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warn("load bpf program failed: %s\n", cp); + pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp); pr_perm_msg(ret); - if (log_buf && log_buf[0] != '\0') { - ret = -LIBBPF_ERRNO__VERIFY; - pr_warn("-- BEGIN DUMP LOG ---\n"); - pr_warn("\n%s\n", log_buf); - pr_warn("-- END LOG --\n"); - } else if (insns_cnt >= BPF_MAXINSNS) { - pr_warn("Program too large (%d insns), at most %d insns\n", - insns_cnt, BPF_MAXINSNS); - ret = -LIBBPF_ERRNO__PROG2BIG; - } else if (prog->type != BPF_PROG_TYPE_KPROBE) { - /* Wrong program type? */ - int fd; - - load_attr.expected_attach_type = 0; - load_attr.log_buf = NULL; - load_attr.log_size = 0; - fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, prog_name, license, - insns, insns_cnt, &load_attr); - if (fd >= 0) { - close(fd); - ret = -LIBBPF_ERRNO__PROGTYPE; - goto out; - } + if (own_log_buf && log_buf && log_buf[0] != '\0') { + pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", + prog->name, log_buf); + } + if (insns_cnt >= BPF_MAXINSNS) { + pr_warn("prog '%s': program too large (%d insns), at most %d insns\n", + prog->name, insns_cnt, BPF_MAXINSNS); } out: - free(log_buf); + if (own_log_buf) + free(log_buf); return ret; } -static int bpf_program__record_externs(struct bpf_program *prog) +static int bpf_program_record_relos(struct bpf_program *prog) { struct bpf_object *obj = prog->obj; int i; @@ -6581,6 +6778,17 @@ static int bpf_program__record_externs(struct bpf_program *prog) ext->is_weak, false, BTF_KIND_FUNC, relo->insn_idx); break; + case RELO_CORE: { + struct bpf_core_relo cr = { + .insn_off = relo->insn_idx * 8, + .type_id = relo->core_relo->type_id, + .access_str_off = relo->core_relo->access_str_off, + .kind = relo->core_relo->kind, + }; + + bpf_gen__record_relo_core(obj->gen_loader, &cr); + break; + } default: continue; } @@ -6620,7 +6828,7 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog prog->name, prog->instances.nr); } if (obj->gen_loader) - bpf_program__record_externs(prog); + bpf_program_record_relos(prog); err = bpf_object_load_prog_instance(obj, prog, prog->insns, prog->insns_cnt, license, kern_ver, &fd); @@ -6749,14 +6957,16 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object return 0; } -static struct bpf_object * -__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, - const struct bpf_object_open_opts *opts) +static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz, + const struct bpf_object_open_opts *opts) { const char *obj_name, *kconfig, *btf_tmp_path; struct bpf_object *obj; char tmp_name[64]; int err; + char *log_buf; + size_t log_size; + __u32 log_level; if (elf_version(EV_CURRENT) == EV_NONE) { pr_warn("failed to init libelf for %s\n", @@ -6779,10 +6989,22 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, pr_debug("loading object '%s' from buffer\n", obj_name); } + log_buf = OPTS_GET(opts, kernel_log_buf, NULL); + log_size = OPTS_GET(opts, kernel_log_size, 0); + log_level = OPTS_GET(opts, kernel_log_level, 0); + if (log_size > UINT_MAX) + return ERR_PTR(-EINVAL); + if (log_size && !log_buf) + return ERR_PTR(-EINVAL); + obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); if (IS_ERR(obj)) return obj; + obj->log_buf = log_buf; + obj->log_size = log_size; + obj->log_level = log_level; + btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL); if (btf_tmp_path) { if (strlen(btf_tmp_path) >= PATH_MAX) { @@ -6836,7 +7058,7 @@ __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags) return NULL; pr_debug("loading %s\n", attr->file); - return __bpf_object__open(attr->file, NULL, 0, &opts); + return bpf_object_open(attr->file, NULL, 0, &opts); } struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) @@ -6862,7 +7084,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) pr_debug("loading %s\n", path); - return libbpf_ptr(__bpf_object__open(path, NULL, 0, opts)); + return libbpf_ptr(bpf_object_open(path, NULL, 0, opts)); } struct bpf_object * @@ -6872,7 +7094,7 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, if (!obj_buf || obj_buf_sz == 0) return libbpf_err_ptr(-EINVAL); - return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, opts)); + return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts)); } struct bpf_object * @@ -6889,7 +7111,7 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, if (!obj_buf || obj_buf_sz == 0) return errno = EINVAL, NULL; - return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts)); + return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, &opts)); } static int bpf_object_unload(struct bpf_object *obj) @@ -6920,10 +7142,6 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj) bpf_object__for_each_map(m, obj) { if (!bpf_map__is_internal(m)) continue; - if (!kernel_supports(obj, FEAT_GLOBAL_DATA)) { - pr_warn("kernel doesn't support global data\n"); - return -ENOTSUP; - } if (!kernel_supports(obj, FEAT_ARRAY_MMAP)) m->def.map_flags ^= BPF_F_MMAPABLE; } @@ -7246,14 +7464,10 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, return 0; } -int bpf_object__load_xattr(struct bpf_object_load_attr *attr) +static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) { - struct bpf_object *obj; int err, i; - if (!attr) - return libbpf_err(-EINVAL); - obj = attr->obj; if (!obj) return libbpf_err(-EINVAL); @@ -7263,7 +7477,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) } if (obj->gen_loader) - bpf_gen__init(obj->gen_loader, attr->log_level, obj->nr_programs, obj->nr_maps); + bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps); err = bpf_object__probe_loading(obj); err = err ? : bpf_object__load_vmlinux_btf(obj, false); @@ -7272,8 +7486,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) err = err ? : bpf_object__sanitize_maps(obj); err = err ? : bpf_object__init_kern_struct_ops_maps(obj); err = err ? : bpf_object__create_maps(obj); - err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : attr->target_btf_path); - err = err ? : bpf_object__load_progs(obj, attr->log_level); + err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); + err = err ? : bpf_object__load_progs(obj, extra_log_level); + err = err ? : bpf_object_init_prog_arrays(obj); if (obj->gen_loader) { /* reset FDs */ @@ -7317,13 +7532,14 @@ out: return libbpf_err(err); } -int bpf_object__load(struct bpf_object *obj) +int bpf_object__load_xattr(struct bpf_object_load_attr *attr) { - struct bpf_object_load_attr attr = { - .obj = obj, - }; + return bpf_object_load(attr->obj, attr->log_level, attr->target_btf_path); +} - return bpf_object__load_xattr(&attr); +int bpf_object__load(struct bpf_object *obj) +{ + return bpf_object_load(obj, 0, NULL); } static int make_parent_dir(const char *path) @@ -7712,6 +7928,9 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) char *pin_path = NULL; char buf[PATH_MAX]; + if (map->skipped) + continue; + if (path) { int len; @@ -8296,12 +8515,46 @@ __u32 bpf_program__flags(const struct bpf_program *prog) return prog->prog_flags; } -int bpf_program__set_extra_flags(struct bpf_program *prog, __u32 extra_flags) +int bpf_program__set_flags(struct bpf_program *prog, __u32 flags) +{ + if (prog->obj->loaded) + return libbpf_err(-EBUSY); + + prog->prog_flags = flags; + return 0; +} + +__u32 bpf_program__log_level(const struct bpf_program *prog) +{ + return prog->log_level; +} + +int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level) { if (prog->obj->loaded) return libbpf_err(-EBUSY); - prog->prog_flags |= extra_flags; + prog->log_level = log_level; + return 0; +} + +const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size) +{ + *log_size = prog->log_size; + return prog->log_buf; +} + +int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size) +{ + if (log_size && !log_buf) + return -EINVAL; + if (prog->log_size > UINT_MAX) + return -EINVAL; + if (prog->obj->loaded) + return -EBUSY; + + prog->log_buf = log_buf; + prog->log_size = log_size; return 0; } diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 4ec69f224342..a8b894dae633 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -24,6 +24,10 @@ extern "C" { #endif +LIBBPF_API __u32 libbpf_major_version(void); +LIBBPF_API __u32 libbpf_minor_version(void); +LIBBPF_API const char *libbpf_version_string(void); + enum libbpf_errno { __LIBBPF_ERRNO__START = 4000, @@ -104,12 +108,73 @@ struct bpf_object_open_opts { * struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux. */ const char *btf_custom_path; + /* Pointer to a buffer for storing kernel logs for applicable BPF + * commands. Valid kernel_log_size has to be specified as well and are + * passed-through to bpf() syscall. Keep in mind that kernel might + * fail operation with -ENOSPC error if provided buffer is too small + * to contain entire log output. + * See the comment below for kernel_log_level for interaction between + * log_buf and log_level settings. + * + * If specified, this log buffer will be passed for: + * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden + * with bpf_program__set_log() on per-program level, to get + * BPF verifier log output. + * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get + * BTF sanity checking log. + * + * Each BPF command (BPF_BTF_LOAD or BPF_PROG_LOAD) will overwrite + * previous contents, so if you need more fine-grained control, set + * per-program buffer with bpf_program__set_log_buf() to preserve each + * individual program's verification log. Keep using kernel_log_buf + * for BTF verification log, if necessary. + */ + char *kernel_log_buf; + size_t kernel_log_size; + /* + * Log level can be set independently from log buffer. Log_level=0 + * means that libbpf will attempt loading BTF or program without any + * logging requested, but will retry with either its own or custom log + * buffer, if provided, and log_level=1 on any error. + * And vice versa, setting log_level>0 will request BTF or prog + * loading with verbose log from the first attempt (and as such also + * for successfully loaded BTF or program), and the actual log buffer + * could be either libbpf's own auto-allocated log buffer, if + * kernel_log_buffer is NULL, or user-provided custom kernel_log_buf. + * If user didn't provide custom log buffer, libbpf will emit captured + * logs through its print callback. + */ + __u32 kernel_log_level; + + size_t :0; }; -#define bpf_object_open_opts__last_field btf_custom_path +#define bpf_object_open_opts__last_field kernel_log_level LIBBPF_API struct bpf_object *bpf_object__open(const char *path); + +/** + * @brief **bpf_object__open_file()** creates a bpf_object by opening + * the BPF ELF object file pointed to by the passed path and loading it + * into memory. + * @param path BPF object file path + * @param opts options for how to load the bpf object, this parameter is + * optional and can be set to NULL + * @return pointer to the new bpf_object; or NULL is returned on error, + * error code is stored in errno + */ LIBBPF_API struct bpf_object * bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts); + +/** + * @brief **bpf_object__open_mem()** creates a bpf_object by reading + * the BPF objects raw bytes from a memory buffer containing a valid + * BPF ELF object file. + * @param obj_buf pointer to the buffer containing ELF file bytes + * @param obj_buf_sz number of bytes in the buffer + * @param opts options for how to load the bpf object + * @return pointer to the new bpf_object; or NULL is returned on error, + * error code is stored in errno + */ LIBBPF_API struct bpf_object * bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, const struct bpf_object_open_opts *opts); @@ -149,6 +214,7 @@ struct bpf_object_load_attr { /* Load/unload object into/from kernel */ LIBBPF_API int bpf_object__load(struct bpf_object *obj); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__load() instead") LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr); LIBBPF_DEPRECATED_SINCE(0, 6, "bpf_object__unload() is deprecated, use bpf_object__close() instead") LIBBPF_API int bpf_object__unload(struct bpf_object *obj); @@ -344,10 +410,41 @@ struct bpf_uprobe_opts { }; #define bpf_uprobe_opts__last_field retprobe +/** + * @brief **bpf_program__attach_uprobe()** attaches a BPF program + * to the userspace function which is found by binary path and + * offset. You can optionally specify a particular proccess to attach + * to. You can also optionally attach the program to the function + * exit instead of entry. + * + * @param prog BPF program to attach + * @param retprobe Attach to function exit + * @param pid Process ID to attach the uprobe to, 0 for self (own process), + * -1 for all processes + * @param binary_path Path to binary that contains the function symbol + * @param func_offset Offset within the binary of the function symbol + * @return Reference to the newly created BPF link; or NULL is returned on error, + * error code is stored in errno + */ LIBBPF_API struct bpf_link * bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe, pid_t pid, const char *binary_path, size_t func_offset); + +/** + * @brief **bpf_program__attach_uprobe_opts()** is just like + * bpf_program__attach_uprobe() except with a options struct + * for various configurations. + * + * @param prog BPF program to attach + * @param pid Process ID to attach the uprobe to, 0 for self (own process), + * -1 for all processes + * @param binary_path Path to binary that contains the function symbol + * @param func_offset Offset within the binary of the function symbol + * @param opts Options for altering program attachment + * @return Reference to the newly created BPF link; or NULL is returned on error, + * error code is stored in errno + */ LIBBPF_API struct bpf_link * bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, const char *binary_path, size_t func_offset, @@ -494,7 +591,16 @@ bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type); LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog); -LIBBPF_API int bpf_program__set_extra_flags(struct bpf_program *prog, __u32 extra_flags); +LIBBPF_API int bpf_program__set_flags(struct bpf_program *prog, __u32 flags); + +/* Per-program log level and log buffer getters/setters. + * See bpf_object_open_opts comments regarding log_level and log_buf + * interactions. + */ +LIBBPF_API __u32 bpf_program__log_level(const struct bpf_program *prog); +LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level); +LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size); +LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size); LIBBPF_API int bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, @@ -676,6 +782,7 @@ struct bpf_prog_load_attr { int prog_flags; }; +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open() and bpf_object__load() instead") LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, struct bpf_object **pobj, int *prog_fd); LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open() and bpf_object__load() instead") @@ -1031,11 +1138,11 @@ struct bpf_object_skeleton { struct bpf_object **obj; int map_cnt; - int map_skel_sz; /* sizeof(struct bpf_skeleton_map) */ + int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ struct bpf_map_skeleton *maps; int prog_cnt; - int prog_skel_sz; /* sizeof(struct bpf_skeleton_prog) */ + int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ struct bpf_prog_skeleton *progs; }; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 6a59514a48cf..4d483af7dba6 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -391,6 +391,7 @@ LIBBPF_0.6.0 { global: bpf_map__map_extra; bpf_map__set_map_extra; + bpf_map_create; bpf_object__next_map; bpf_object__next_program; bpf_object__prev_map; @@ -400,7 +401,7 @@ LIBBPF_0.6.0 { bpf_program__flags; bpf_program__insn_cnt; bpf_program__insns; - bpf_program__set_extra_flags; + bpf_program__set_flags; btf__add_btf; btf__add_decl_tag; btf__add_type_tag; @@ -410,8 +411,20 @@ LIBBPF_0.6.0 { btf__type_cnt; btf_dump__new; btf_dump__new_deprecated; + libbpf_major_version; + libbpf_minor_version; + libbpf_version_string; perf_buffer__new; perf_buffer__new_deprecated; perf_buffer__new_raw; perf_buffer__new_raw_deprecated; } LIBBPF_0.5.0; + +LIBBPF_0.7.0 { + global: + bpf_btf_load; + bpf_program__log_buf; + bpf_program__log_level; + bpf_program__set_log_buf; + bpf_program__set_log_level; +}; diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h index b21cefc9c3b6..000e37798ff2 100644 --- a/tools/lib/bpf/libbpf_common.h +++ b/tools/lib/bpf/libbpf_common.h @@ -40,6 +40,11 @@ #else #define __LIBBPF_MARK_DEPRECATED_0_7(X) #endif +#if __LIBBPF_CURRENT_VERSION_GEQ(0, 8) +#define __LIBBPF_MARK_DEPRECATED_0_8(X) X +#else +#define __LIBBPF_MARK_DEPRECATED_0_8(X) +#endif /* This set of internal macros allows to do "function overloading" based on * number of arguments provided by used in backwards-compatible way during the diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index f7ac349650a1..355c41019aed 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -172,7 +172,7 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size) struct btf; struct btf_type; -struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id); +struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id); const char *btf_kind_str(const struct btf_type *t); const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id); @@ -277,27 +277,7 @@ int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz); int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz); int libbpf__load_raw_btf(const char *raw_types, size_t types_len, const char *str_sec, size_t str_len); - -struct bpf_create_map_params { - const char *name; - enum bpf_map_type map_type; - __u32 map_flags; - __u32 key_size; - __u32 value_size; - __u32 max_entries; - __u32 numa_node; - __u32 btf_fd; - __u32 btf_key_type_id; - __u32 btf_value_type_id; - __u32 map_ifindex; - union { - __u32 inner_map_fd; - __u32 btf_vmlinux_value_type_id; - }; - __u64 map_extra; -}; - -int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr); +int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level); struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf); void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 02c401e314c7..4bdec69523a7 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -164,7 +164,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len, memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len); memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len); - btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false); + btf_fd = bpf_btf_load(raw_btf, btf_len, NULL); free(raw_btf); return btf_fd; @@ -201,7 +201,6 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) { int key_size, value_size, max_entries, map_flags; __u32 btf_key_type_id = 0, btf_value_type_id = 0; - struct bpf_create_map_attr attr = {}; int fd = -1, btf_fd = -1, fd_inner; key_size = sizeof(__u32); @@ -271,34 +270,35 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS || map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { + LIBBPF_OPTS(bpf_map_create_opts, opts); + /* TODO: probe for device, once libbpf has a function to create * map-in-map for offload */ if (ifindex) return false; - fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH, - sizeof(__u32), sizeof(__u32), 1, 0); + fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, + sizeof(__u32), sizeof(__u32), 1, NULL); if (fd_inner < 0) return false; - fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32), - fd_inner, 1, 0); + + opts.inner_map_fd = fd_inner; + fd = bpf_map_create(map_type, NULL, sizeof(__u32), sizeof(__u32), 1, &opts); close(fd_inner); } else { + LIBBPF_OPTS(bpf_map_create_opts, opts); + /* Note: No other restriction on map type probes for offload */ - attr.map_type = map_type; - attr.key_size = key_size; - attr.value_size = value_size; - attr.max_entries = max_entries; - attr.map_flags = map_flags; - attr.map_ifindex = ifindex; + opts.map_flags = map_flags; + opts.map_ifindex = ifindex; if (btf_fd >= 0) { - attr.btf_fd = btf_fd; - attr.btf_key_type_id = btf_key_type_id; - attr.btf_value_type_id = btf_value_type_id; + opts.btf_fd = btf_fd; + opts.btf_key_type_id = btf_key_type_id; + opts.btf_value_type_id = btf_value_type_id; } - fd = bpf_create_map_xattr(&attr); + fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts); } if (fd >= 0) close(fd); diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h index dd56d76f291c..0fefefc3500b 100644 --- a/tools/lib/bpf/libbpf_version.h +++ b/tools/lib/bpf/libbpf_version.h @@ -4,6 +4,6 @@ #define __LIBBPF_VERSION_H #define LIBBPF_MAJOR_VERSION 0 -#define LIBBPF_MINOR_VERSION 6 +#define LIBBPF_MINOR_VERSION 7 #endif /* __LIBBPF_VERSION_H */ diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 594b206fa674..9aa016fb55aa 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -210,6 +210,7 @@ void bpf_linker__free(struct bpf_linker *linker) } free(linker->secs); + free(linker->glob_syms); free(linker); } @@ -1999,7 +2000,7 @@ add_sym: static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj) { struct src_sec *src_symtab = &obj->secs[obj->symtab_sec_idx]; - struct dst_sec *dst_symtab = &linker->secs[linker->symtab_sec_idx]; + struct dst_sec *dst_symtab; int i, err; for (i = 1; i < obj->sec_cnt; i++) { @@ -2032,6 +2033,9 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob return -1; } + /* add_dst_sec() above could have invalidated linker->secs */ + dst_symtab = &linker->secs[linker->symtab_sec_idx]; + /* shdr->sh_link points to SYMTAB */ dst_sec->shdr->sh_link = linker->symtab_sec_idx; diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c index b5b8956a1be8..32464f0ab4b1 100644 --- a/tools/lib/bpf/relo_core.c +++ b/tools/lib/bpf/relo_core.c @@ -1,6 +1,60 @@ // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2019 Facebook */ +#ifdef __KERNEL__ +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/string.h> +#include <linux/bpf_verifier.h> +#include "relo_core.h" + +static const char *btf_kind_str(const struct btf_type *t) +{ + return btf_type_str(t); +} + +static bool is_ldimm64_insn(struct bpf_insn *insn) +{ + return insn->code == (BPF_LD | BPF_IMM | BPF_DW); +} + +static const struct btf_type * +skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id) +{ + return btf_type_skip_modifiers(btf, id, res_id); +} + +static const char *btf__name_by_offset(const struct btf *btf, u32 offset) +{ + return btf_name_by_offset(btf, offset); +} + +static s64 btf__resolve_size(const struct btf *btf, u32 type_id) +{ + const struct btf_type *t; + int size; + + t = btf_type_by_id(btf, type_id); + t = btf_resolve_size(btf, t, &size); + if (IS_ERR(t)) + return PTR_ERR(t); + return size; +} + +enum libbpf_print_level { + LIBBPF_WARN, + LIBBPF_INFO, + LIBBPF_DEBUG, +}; + +#undef pr_warn +#undef pr_info +#undef pr_debug +#define pr_warn(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) +#define pr_info(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) +#define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) +#define libbpf_print(level, fmt, ...) bpf_log((void *)prog_name, fmt, ##__VA_ARGS__) +#else #include <stdio.h> #include <string.h> #include <errno.h> @@ -12,33 +66,7 @@ #include "btf.h" #include "str_error.h" #include "libbpf_internal.h" - -#define BPF_CORE_SPEC_MAX_LEN 64 - -/* represents BPF CO-RE field or array element accessor */ -struct bpf_core_accessor { - __u32 type_id; /* struct/union type or array element type */ - __u32 idx; /* field index or array index */ - const char *name; /* field name or NULL for array accessor */ -}; - -struct bpf_core_spec { - const struct btf *btf; - /* high-level spec: named fields and array indices only */ - struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN]; - /* original unresolved (no skip_mods_or_typedefs) root type ID */ - __u32 root_type_id; - /* CO-RE relocation kind */ - enum bpf_core_relo_kind relo_kind; - /* high-level spec length */ - int len; - /* raw, low-level spec: 1-to-1 with accessor spec string */ - int raw_spec[BPF_CORE_SPEC_MAX_LEN]; - /* raw spec length */ - int raw_len; - /* field bit offset represented by spec */ - __u32 bit_offset; -}; +#endif static bool is_flex_arr(const struct btf *btf, const struct bpf_core_accessor *acc, @@ -51,25 +79,25 @@ static bool is_flex_arr(const struct btf *btf, return false; /* has to be the last member of enclosing struct */ - t = btf__type_by_id(btf, acc->type_id); + t = btf_type_by_id(btf, acc->type_id); return acc->idx == btf_vlen(t) - 1; } static const char *core_relo_kind_str(enum bpf_core_relo_kind kind) { switch (kind) { - case BPF_FIELD_BYTE_OFFSET: return "byte_off"; - case BPF_FIELD_BYTE_SIZE: return "byte_sz"; - case BPF_FIELD_EXISTS: return "field_exists"; - case BPF_FIELD_SIGNED: return "signed"; - case BPF_FIELD_LSHIFT_U64: return "lshift_u64"; - case BPF_FIELD_RSHIFT_U64: return "rshift_u64"; - case BPF_TYPE_ID_LOCAL: return "local_type_id"; - case BPF_TYPE_ID_TARGET: return "target_type_id"; - case BPF_TYPE_EXISTS: return "type_exists"; - case BPF_TYPE_SIZE: return "type_size"; - case BPF_ENUMVAL_EXISTS: return "enumval_exists"; - case BPF_ENUMVAL_VALUE: return "enumval_value"; + case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off"; + case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz"; + case BPF_CORE_FIELD_EXISTS: return "field_exists"; + case BPF_CORE_FIELD_SIGNED: return "signed"; + case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64"; + case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64"; + case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id"; + case BPF_CORE_TYPE_ID_TARGET: return "target_type_id"; + case BPF_CORE_TYPE_EXISTS: return "type_exists"; + case BPF_CORE_TYPE_SIZE: return "type_size"; + case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists"; + case BPF_CORE_ENUMVAL_VALUE: return "enumval_value"; default: return "unknown"; } } @@ -77,12 +105,12 @@ static const char *core_relo_kind_str(enum bpf_core_relo_kind kind) static bool core_relo_is_field_based(enum bpf_core_relo_kind kind) { switch (kind) { - case BPF_FIELD_BYTE_OFFSET: - case BPF_FIELD_BYTE_SIZE: - case BPF_FIELD_EXISTS: - case BPF_FIELD_SIGNED: - case BPF_FIELD_LSHIFT_U64: - case BPF_FIELD_RSHIFT_U64: + case BPF_CORE_FIELD_BYTE_OFFSET: + case BPF_CORE_FIELD_BYTE_SIZE: + case BPF_CORE_FIELD_EXISTS: + case BPF_CORE_FIELD_SIGNED: + case BPF_CORE_FIELD_LSHIFT_U64: + case BPF_CORE_FIELD_RSHIFT_U64: return true; default: return false; @@ -92,10 +120,10 @@ static bool core_relo_is_field_based(enum bpf_core_relo_kind kind) static bool core_relo_is_type_based(enum bpf_core_relo_kind kind) { switch (kind) { - case BPF_TYPE_ID_LOCAL: - case BPF_TYPE_ID_TARGET: - case BPF_TYPE_EXISTS: - case BPF_TYPE_SIZE: + case BPF_CORE_TYPE_ID_LOCAL: + case BPF_CORE_TYPE_ID_TARGET: + case BPF_CORE_TYPE_EXISTS: + case BPF_CORE_TYPE_SIZE: return true; default: return false; @@ -105,8 +133,8 @@ static bool core_relo_is_type_based(enum bpf_core_relo_kind kind) static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) { switch (kind) { - case BPF_ENUMVAL_EXISTS: - case BPF_ENUMVAL_VALUE: + case BPF_CORE_ENUMVAL_EXISTS: + case BPF_CORE_ENUMVAL_VALUE: return true; default: return false; @@ -150,7 +178,7 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access * string to specify enumerator's value index that need to be relocated. */ -static int bpf_core_parse_spec(const struct btf *btf, +static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf, __u32 type_id, const char *spec_str, enum bpf_core_relo_kind relo_kind, @@ -272,8 +300,8 @@ static int bpf_core_parse_spec(const struct btf *btf, return sz; spec->bit_offset += access_idx * sz * 8; } else { - pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n", - type_id, spec_str, i, id, btf_kind_str(t)); + pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n", + prog_name, type_id, spec_str, i, id, btf_kind_str(t)); return -EINVAL; } } @@ -346,8 +374,6 @@ recur: targ_id = btf_array(targ_type)->type; goto recur; default: - pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n", - btf_kind(local_type), local_id, targ_id); return 0; } } @@ -388,7 +414,7 @@ static int bpf_core_match_member(const struct btf *local_btf, return 0; local_id = local_acc->type_id; - local_type = btf__type_by_id(local_btf, local_id); + local_type = btf_type_by_id(local_btf, local_id); local_member = btf_members(local_type) + local_acc->idx; local_name = btf__name_by_offset(local_btf, local_member->name_off); @@ -571,7 +597,7 @@ static int bpf_core_calc_field_relo(const char *prog_name, *field_sz = 0; - if (relo->kind == BPF_FIELD_EXISTS) { + if (relo->kind == BPF_CORE_FIELD_EXISTS) { *val = spec ? 1 : 0; return 0; } @@ -580,11 +606,11 @@ static int bpf_core_calc_field_relo(const char *prog_name, return -EUCLEAN; /* request instruction poisoning */ acc = &spec->spec[spec->len - 1]; - t = btf__type_by_id(spec->btf, acc->type_id); + t = btf_type_by_id(spec->btf, acc->type_id); /* a[n] accessor needs special handling */ if (!acc->name) { - if (relo->kind == BPF_FIELD_BYTE_OFFSET) { + if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) { *val = spec->bit_offset / 8; /* remember field size for load/store mem size */ sz = btf__resolve_size(spec->btf, acc->type_id); @@ -592,7 +618,7 @@ static int bpf_core_calc_field_relo(const char *prog_name, return -EINVAL; *field_sz = sz; *type_id = acc->type_id; - } else if (relo->kind == BPF_FIELD_BYTE_SIZE) { + } else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) { sz = btf__resolve_size(spec->btf, acc->type_id); if (sz < 0) return -EINVAL; @@ -644,36 +670,36 @@ static int bpf_core_calc_field_relo(const char *prog_name, *validate = !bitfield; switch (relo->kind) { - case BPF_FIELD_BYTE_OFFSET: + case BPF_CORE_FIELD_BYTE_OFFSET: *val = byte_off; if (!bitfield) { *field_sz = byte_sz; *type_id = field_type_id; } break; - case BPF_FIELD_BYTE_SIZE: + case BPF_CORE_FIELD_BYTE_SIZE: *val = byte_sz; break; - case BPF_FIELD_SIGNED: + case BPF_CORE_FIELD_SIGNED: /* enums will be assumed unsigned */ *val = btf_is_enum(mt) || (btf_int_encoding(mt) & BTF_INT_SIGNED); if (validate) *validate = true; /* signedness is never ambiguous */ break; - case BPF_FIELD_LSHIFT_U64: + case BPF_CORE_FIELD_LSHIFT_U64: #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ *val = 64 - (bit_off + bit_sz - byte_off * 8); #else *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); #endif break; - case BPF_FIELD_RSHIFT_U64: + case BPF_CORE_FIELD_RSHIFT_U64: *val = 64 - bit_sz; if (validate) *validate = true; /* right shift is never ambiguous */ break; - case BPF_FIELD_EXISTS: + case BPF_CORE_FIELD_EXISTS: default: return -EOPNOTSUPP; } @@ -694,20 +720,20 @@ static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo, } switch (relo->kind) { - case BPF_TYPE_ID_TARGET: + case BPF_CORE_TYPE_ID_TARGET: *val = spec->root_type_id; break; - case BPF_TYPE_EXISTS: + case BPF_CORE_TYPE_EXISTS: *val = 1; break; - case BPF_TYPE_SIZE: + case BPF_CORE_TYPE_SIZE: sz = btf__resolve_size(spec->btf, spec->root_type_id); if (sz < 0) return -EINVAL; *val = sz; break; - case BPF_TYPE_ID_LOCAL: - /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */ + case BPF_CORE_TYPE_ID_LOCAL: + /* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */ default: return -EOPNOTSUPP; } @@ -723,13 +749,13 @@ static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, const struct btf_enum *e; switch (relo->kind) { - case BPF_ENUMVAL_EXISTS: + case BPF_CORE_ENUMVAL_EXISTS: *val = spec ? 1 : 0; break; - case BPF_ENUMVAL_VALUE: + case BPF_CORE_ENUMVAL_VALUE: if (!spec) return -EUCLEAN; /* request instruction poisoning */ - t = btf__type_by_id(spec->btf, spec->spec[0].type_id); + t = btf_type_by_id(spec->btf, spec->spec[0].type_id); e = btf_enum(t) + spec->spec[0].idx; *val = e->val; break; @@ -805,8 +831,8 @@ static int bpf_core_calc_relo(const char *prog_name, if (res->orig_sz != res->new_sz) { const struct btf_type *orig_t, *new_t; - orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id); - new_t = btf__type_by_id(targ_spec->btf, res->new_type_id); + orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id); + new_t = btf_type_by_id(targ_spec->btf, res->new_type_id); /* There are two use cases in which it's safe to * adjust load/store's mem size: @@ -1045,7 +1071,7 @@ poison: * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>, * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b */ -static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec) +static void bpf_core_dump_spec(const char *prog_name, int level, const struct bpf_core_spec *spec) { const struct btf_type *t; const struct btf_enum *e; @@ -1054,7 +1080,7 @@ static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec) int i; type_id = spec->root_type_id; - t = btf__type_by_id(spec->btf, type_id); + t = btf_type_by_id(spec->btf, type_id); s = btf__name_by_offset(spec->btf, t->name_off); libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s); @@ -1147,9 +1173,12 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, const struct bpf_core_relo *relo, int relo_idx, const struct btf *local_btf, - struct bpf_core_cand_list *cands) + struct bpf_core_cand_list *cands, + struct bpf_core_spec *specs_scratch) { - struct bpf_core_spec local_spec, cand_spec, targ_spec = {}; + struct bpf_core_spec *local_spec = &specs_scratch[0]; + struct bpf_core_spec *cand_spec = &specs_scratch[1]; + struct bpf_core_spec *targ_spec = &specs_scratch[2]; struct bpf_core_relo_res cand_res, targ_res; const struct btf_type *local_type; const char *local_name; @@ -1158,10 +1187,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, int i, j, err; local_id = relo->type_id; - local_type = btf__type_by_id(local_btf, local_id); - if (!local_type) - return -EINVAL; - + local_type = btf_type_by_id(local_btf, local_id); local_name = btf__name_by_offset(local_btf, local_type->name_off); if (!local_name) return -EINVAL; @@ -1170,7 +1196,8 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, if (str_is_empty(spec_str)) return -EINVAL; - err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec); + err = bpf_core_parse_spec(prog_name, local_btf, local_id, spec_str, + relo->kind, local_spec); if (err) { pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n", prog_name, relo_idx, local_id, btf_kind_str(local_type), @@ -1181,15 +1208,15 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); - bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec); + bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, local_spec); libbpf_print(LIBBPF_DEBUG, "\n"); /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */ - if (relo->kind == BPF_TYPE_ID_LOCAL) { + if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) { targ_res.validate = true; targ_res.poison = false; - targ_res.orig_val = local_spec.root_type_id; - targ_res.new_val = local_spec.root_type_id; + targ_res.orig_val = local_spec->root_type_id; + targ_res.new_val = local_spec->root_type_id; goto patch_insn; } @@ -1202,38 +1229,38 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, for (i = 0, j = 0; i < cands->len; i++) { - err = bpf_core_spec_match(&local_spec, cands->cands[i].btf, - cands->cands[i].id, &cand_spec); + err = bpf_core_spec_match(local_spec, cands->cands[i].btf, + cands->cands[i].id, cand_spec); if (err < 0) { pr_warn("prog '%s': relo #%d: error matching candidate #%d ", prog_name, relo_idx, i); - bpf_core_dump_spec(LIBBPF_WARN, &cand_spec); + bpf_core_dump_spec(prog_name, LIBBPF_WARN, cand_spec); libbpf_print(LIBBPF_WARN, ": %d\n", err); return err; } pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog_name, relo_idx, err == 0 ? "non-matching" : "matching", i); - bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec); + bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, cand_spec); libbpf_print(LIBBPF_DEBUG, "\n"); if (err == 0) continue; - err = bpf_core_calc_relo(prog_name, relo, relo_idx, &local_spec, &cand_spec, &cand_res); + err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res); if (err) return err; if (j == 0) { targ_res = cand_res; - targ_spec = cand_spec; - } else if (cand_spec.bit_offset != targ_spec.bit_offset) { + *targ_spec = *cand_spec; + } else if (cand_spec->bit_offset != targ_spec->bit_offset) { /* if there are many field relo candidates, they * should all resolve to the same bit offset */ pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n", - prog_name, relo_idx, cand_spec.bit_offset, - targ_spec.bit_offset); + prog_name, relo_idx, cand_spec->bit_offset, + targ_spec->bit_offset); return -EINVAL; } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) { /* all candidates should result in the same relocation @@ -1251,7 +1278,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, } /* - * For BPF_FIELD_EXISTS relo or when used BPF program has field + * For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field * existence checks or kernel version/config checks, it's expected * that we might not find any candidates. In this case, if field * wasn't found in any candidate, the list of candidates shouldn't @@ -1277,7 +1304,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, prog_name, relo_idx); /* calculate single target relo result explicitly */ - err = bpf_core_calc_relo(prog_name, relo, relo_idx, &local_spec, NULL, &targ_res); + err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, &targ_res); if (err) return err; } diff --git a/tools/lib/bpf/relo_core.h b/tools/lib/bpf/relo_core.h index 3b9f8f18346c..17799819ad7c 100644 --- a/tools/lib/bpf/relo_core.h +++ b/tools/lib/bpf/relo_core.h @@ -4,81 +4,10 @@ #ifndef __RELO_CORE_H #define __RELO_CORE_H -/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value - * has to be adjusted by relocations. - */ -enum bpf_core_relo_kind { - BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ - BPF_FIELD_BYTE_SIZE = 1, /* field size in bytes */ - BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ - BPF_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ - BPF_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ - BPF_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ - BPF_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ - BPF_TYPE_ID_TARGET = 7, /* type ID in target kernel */ - BPF_TYPE_EXISTS = 8, /* type existence in target kernel */ - BPF_TYPE_SIZE = 9, /* type size in bytes */ - BPF_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ - BPF_ENUMVAL_VALUE = 11, /* enum value integer value */ -}; - -/* The minimum bpf_core_relo checked by the loader - * - * CO-RE relocation captures the following data: - * - insn_off - instruction offset (in bytes) within a BPF program that needs - * its insn->imm field to be relocated with actual field info; - * - type_id - BTF type ID of the "root" (containing) entity of a relocatable - * type or field; - * - access_str_off - offset into corresponding .BTF string section. String - * interpretation depends on specific relocation kind: - * - for field-based relocations, string encodes an accessed field using - * a sequence of field and array indices, separated by colon (:). It's - * conceptually very close to LLVM's getelementptr ([0]) instruction's - * arguments for identifying offset to a field. - * - for type-based relocations, strings is expected to be just "0"; - * - for enum value-based relocations, string contains an index of enum - * value within its enum type; - * - * Example to provide a better feel. - * - * struct sample { - * int a; - * struct { - * int b[10]; - * }; - * }; - * - * struct sample *s = ...; - * int x = &s->a; // encoded as "0:0" (a is field #0) - * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, - * // b is field #0 inside anon struct, accessing elem #5) - * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) - * - * type_id for all relocs in this example will capture BTF type id of - * `struct sample`. - * - * Such relocation is emitted when using __builtin_preserve_access_index() - * Clang built-in, passing expression that captures field address, e.g.: - * - * bpf_probe_read(&dst, sizeof(dst), - * __builtin_preserve_access_index(&src->a.b.c)); - * - * In this case Clang will emit field relocation recording necessary data to - * be able to find offset of embedded `a.b.c` field within `src` struct. - * - * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction - */ -struct bpf_core_relo { - __u32 insn_off; - __u32 type_id; - __u32 access_str_off; - enum bpf_core_relo_kind kind; -}; +#include <linux/bpf.h> struct bpf_core_cand { const struct btf *btf; - const struct btf_type *t; - const char *name; __u32 id; }; @@ -88,11 +17,39 @@ struct bpf_core_cand_list { int len; }; +#define BPF_CORE_SPEC_MAX_LEN 64 + +/* represents BPF CO-RE field or array element accessor */ +struct bpf_core_accessor { + __u32 type_id; /* struct/union type or array element type */ + __u32 idx; /* field index or array index */ + const char *name; /* field name or NULL for array accessor */ +}; + +struct bpf_core_spec { + const struct btf *btf; + /* high-level spec: named fields and array indices only */ + struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN]; + /* original unresolved (no skip_mods_or_typedefs) root type ID */ + __u32 root_type_id; + /* CO-RE relocation kind */ + enum bpf_core_relo_kind relo_kind; + /* high-level spec length */ + int len; + /* raw, low-level spec: 1-to-1 with accessor spec string */ + int raw_spec[BPF_CORE_SPEC_MAX_LEN]; + /* raw spec length */ + int raw_len; + /* field bit offset represented by spec */ + __u32 bit_offset; +}; + int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, int insn_idx, const struct bpf_core_relo *relo, int relo_idx, const struct btf *local_btf, - struct bpf_core_cand_list *cands); + struct bpf_core_cand_list *cands, + struct bpf_core_spec *specs_scratch); int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id); diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h index 9cf66702fa8d..0b84d8e6b72a 100644 --- a/tools/lib/bpf/skel_internal.h +++ b/tools/lib/bpf/skel_internal.h @@ -7,6 +7,16 @@ #include <sys/syscall.h> #include <sys/mman.h> +#ifndef __NR_bpf +# if defined(__mips__) && defined(_ABIO32) +# define __NR_bpf 4355 +# elif defined(__mips__) && defined(_ABIN32) +# define __NR_bpf 6319 +# elif defined(__mips__) && defined(_ABI64) +# define __NR_bpf 5315 +# endif +#endif + /* This file is a base header for auto-generated *.lskel.h files. * Its contents will change and may become part of auto-generation in the future. * @@ -65,8 +75,7 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) int map_fd = -1, prog_fd = -1, key = 0, err; union bpf_attr attr; - map_fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, - opts->data_sz, 1, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1, NULL); if (map_fd < 0) { opts->errstr = "failed to create loader map"; err = -errno; diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index fdb22f5405c9..e8d94c6dd3bc 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -35,6 +35,11 @@ #include "libbpf_internal.h" #include "xsk.h" +/* entire xsk.h and xsk.c is going away in libbpf 1.0, so ignore all internal + * uses of deprecated APIs + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + #ifndef SOL_XDP #define SOL_XDP 283 #endif @@ -364,7 +369,6 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area, static enum xsk_prog get_xsk_prog(void) { enum xsk_prog detected = XSK_PROG_FALLBACK; - struct bpf_create_map_attr map_attr; __u32 size_out, retval, duration; char data_in = 0, data_out; struct bpf_insn insns[] = { @@ -376,13 +380,7 @@ static enum xsk_prog get_xsk_prog(void) }; int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns); - memset(&map_attr, 0, sizeof(map_attr)); - map_attr.map_type = BPF_MAP_TYPE_XSKMAP; - map_attr.key_size = sizeof(int); - map_attr.value_size = sizeof(int); - map_attr.max_entries = 1; - - map_fd = bpf_create_map_xattr(&map_attr); + map_fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, NULL, sizeof(int), sizeof(int), 1, NULL); if (map_fd < 0) return detected; @@ -586,8 +584,8 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk) if (max_queues < 0) return max_queues; - fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map", - sizeof(int), sizeof(int), max_queues, 0); + fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, "xsks_map", + sizeof(int), sizeof(int), max_queues, NULL); if (fd < 0) return fd; diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 81a4c543ff7e..4b384c907027 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -375,6 +375,7 @@ static int read_symbols(struct elf *elf) return -1; } memset(sym, 0, sizeof(*sym)); + INIT_LIST_HEAD(&sym->pv_target); sym->alias = sym; sym->idx = i; diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c index c90c7084e45a..bdf699f6552b 100644 --- a/tools/objtool/objtool.c +++ b/tools/objtool/objtool.c @@ -153,6 +153,10 @@ void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func) !strcmp(func->name, "_paravirt_ident_64")) return; + /* already added this function */ + if (!list_empty(&func->pv_target)) + return; + list_add(&func->pv_target, &f->pv_ops[idx].targets); f->pv_ops[idx].clean = false; } diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index afd144725a0b..3df74cf5651a 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -271,8 +271,6 @@ endif FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS) FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS) -FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS) -FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS) FEATURE_CHECK_LDFLAGS-libaio = -lrt diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 7bef917cc84e..15109af9d075 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -528,3 +528,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index df5261e5cfe1..ed9c5c2eafad 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -451,3 +451,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv sys_futex_waitv diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c index fa0ff4ce2b74..488f6e6ba1a5 100644 --- a/tools/perf/bench/sched-messaging.c +++ b/tools/perf/bench/sched-messaging.c @@ -223,8 +223,6 @@ static unsigned int group(pthread_t *pth, snd_ctx->out_fds[i] = fds[1]; if (!thread_mode) close(fds[0]); - - free(ctx); } /* Now we have all the fds, fork the senders */ @@ -241,8 +239,6 @@ static unsigned int group(pthread_t *pth, for (i = 0; i < num_fds; i++) close(snd_ctx->out_fds[i]); - free(snd_ctx); - /* Return number of children to reap */ return num_fds * 2; } diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index bc5259db5fd9..b9d6306cc14e 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -820,7 +820,7 @@ static int __cmd_inject(struct perf_inject *inject) inject->tool.ordered_events = true; inject->tool.ordering_requires_timestamps = true; /* Allow space in the header for new attributes */ - output_data_offset = 4096; + output_data_offset = roundup(8192 + session->header.data_offset, 4096); if (inject->strip) strip_init(inject); } diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c index 329f77f592f4..573490530194 100644 --- a/tools/perf/tests/bpf.c +++ b/tools/perf/tests/bpf.c @@ -296,9 +296,13 @@ static int check_env(void) return err; } +/* temporarily disable libbpf deprecation warnings */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns, ARRAY_SIZE(insns), license, kver_int, NULL, 0); +#pragma GCC diagnostic pop if (err < 0) { pr_err("Missing basic BPF support, skip this test: %s\n", strerror(errno)); diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c index c895de481fe1..d54c5371c6a6 100644 --- a/tools/perf/tests/expr.c +++ b/tools/perf/tests/expr.c @@ -169,7 +169,9 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0); TEST_ASSERT_VAL("#num_cores >= #num_dies", num_cores >= num_dies); TEST_ASSERT_VAL("#num_packages", expr__parse(&num_packages, ctx, "#num_packages") == 0); - TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages); + + if (num_dies) // Some platforms do not have CPU die support, for example s390 + TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages); /* * Source count returns the number of events aggregating in a leader diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c index 574b7e4efd3a..07b6f4ec024f 100644 --- a/tools/perf/tests/parse-metric.c +++ b/tools/perf/tests/parse-metric.c @@ -109,6 +109,7 @@ static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist, struct evsel *evsel; u64 count; + perf_stat__reset_shadow_stats(); evlist__for_each_entry(evlist, evsel) { count = find_value(evsel->name, vals); perf_stat__update_shadow_stats(evsel, count, 0, st); diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index fbb3c4057c30..528aeb0ab79d 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -29,6 +29,9 @@ #include <internal/xyarray.h> +/* temporarily disable libbpf deprecation warnings */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)), const char *fmt, va_list args) { diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index c17d4a43ce06..5a97fd7d0a71 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -307,6 +307,20 @@ static bool bperf_attr_map_compatible(int attr_map_fd) (map_info.value_size == sizeof(struct perf_event_attr_map_entry)); } +int __weak +bpf_map_create(enum bpf_map_type map_type, + const char *map_name __maybe_unused, + __u32 key_size, + __u32 value_size, + __u32 max_entries, + const struct bpf_map_create_opts *opts __maybe_unused) +{ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + return bpf_create_map(map_type, key_size, value_size, max_entries, 0); +#pragma GCC diagnostic pop +} + static int bperf_lock_attr_map(struct target *target) { char path[PATH_MAX]; @@ -320,10 +334,10 @@ static int bperf_lock_attr_map(struct target *target) } if (access(path, F_OK)) { - map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, + map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(struct perf_event_attr), sizeof(struct perf_event_attr_map_entry), - ATTR_MAP_SIZE, 0); + ATTR_MAP_SIZE, NULL); if (map_fd < 0) return -1; diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h deleted file mode 100644 index 186a5551ddb9..000000000000 --- a/tools/perf/util/bpf_skel/bperf.h +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -// Copyright (c) 2021 Facebook - -#ifndef __BPERF_STAT_H -#define __BPERF_STAT_H - -typedef struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(key_size, sizeof(__u32)); - __uint(value_size, sizeof(struct bpf_perf_event_value)); - __uint(max_entries, 1); -} reading_map; - -#endif /* __BPERF_STAT_H */ diff --git a/tools/perf/util/bpf_skel/bperf_follower.bpf.c b/tools/perf/util/bpf_skel/bperf_follower.bpf.c index b8fa3cb2da23..f193998530d4 100644 --- a/tools/perf/util/bpf_skel/bperf_follower.bpf.c +++ b/tools/perf/util/bpf_skel/bperf_follower.bpf.c @@ -1,14 +1,23 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2021 Facebook -#include <linux/bpf.h> -#include <linux/perf_event.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "bperf.h" #include "bperf_u.h" -reading_map diff_readings SEC(".maps"); -reading_map accum_readings SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} diff_readings SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} accum_readings SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH); diff --git a/tools/perf/util/bpf_skel/bperf_leader.bpf.c b/tools/perf/util/bpf_skel/bperf_leader.bpf.c index 4f70d1459e86..e2a2d4cd7779 100644 --- a/tools/perf/util/bpf_skel/bperf_leader.bpf.c +++ b/tools/perf/util/bpf_skel/bperf_leader.bpf.c @@ -1,10 +1,8 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2021 Facebook -#include <linux/bpf.h> -#include <linux/perf_event.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "bperf.h" struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); @@ -13,8 +11,19 @@ struct { __uint(map_flags, BPF_F_PRESERVE_ELEMS); } events SEC(".maps"); -reading_map prev_readings SEC(".maps"); -reading_map diff_readings SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} prev_readings SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} diff_readings SEC(".maps"); SEC("raw_tp/sched_switch") int BPF_PROG(on_switch) diff --git a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c index ab12b4c4ece2..97037d3b3d9f 100644 --- a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c +++ b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2020 Facebook -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 79cce216727e..e3c1a532d059 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2321,6 +2321,7 @@ out: #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ {\ + free(ff->ph->env.__feat_env); \ ff->ph->env.__feat_env = do_read_string(ff); \ return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ } @@ -4124,6 +4125,7 @@ int perf_event__process_feature(struct perf_session *session, struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event; int type = fe->header.type; u64 feat = fe->feat_id; + int ret = 0; if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { pr_warning("invalid record type %d in pipe-mode\n", type); @@ -4141,11 +4143,13 @@ int perf_event__process_feature(struct perf_session *session, ff.size = event->header.size - sizeof(*fe); ff.ph = &session->header; - if (feat_ops[feat].process(&ff, NULL)) - return -1; + if (feat_ops[feat].process(&ff, NULL)) { + ret = -1; + goto out; + } if (!feat_ops[feat].print || !tool->show_feat_hdr) - return 0; + goto out; if (!feat_ops[feat].full_only || tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { @@ -4154,8 +4158,9 @@ int perf_event__process_feature(struct perf_session *session, fprintf(stdout, "# %s info available, use -I to display\n", feat_ops[feat].name); } - - return 0; +out: + free_event_desc(ff.events); + return ret; } size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) diff --git a/tools/perf/util/smt.c b/tools/perf/util/smt.c index 20bacd5972ad..34f1b1b1176c 100644 --- a/tools/perf/util/smt.c +++ b/tools/perf/util/smt.c @@ -15,7 +15,7 @@ int smt_on(void) if (cached) return cached_result; - if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) > 0) + if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0) goto done; ncpu = sysconf(_SC_NPROCESSORS_CONF); diff --git a/tools/testing/radix-tree/linux/lockdep.h b/tools/testing/radix-tree/linux/lockdep.h index 565fccdfe6e9..016cff473cfc 100644 --- a/tools/testing/radix-tree/linux/lockdep.h +++ b/tools/testing/radix-tree/linux/lockdep.h @@ -1,5 +1,8 @@ #ifndef _LINUX_LOCKDEP_H #define _LINUX_LOCKDEP_H + +#include <linux/spinlock.h> + struct lock_class_key { unsigned int a; }; diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 5d42db2e129a..a795bca4c8ec 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -192,22 +192,26 @@ TEST_GEN_PROGS_EXTENDED += $(DEFAULT_BPFTOOL) $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ) -$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_sock: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_sock_addr: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_sockmap: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c testing_helpers.o -$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_sock_fields: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_sysctl: cgroup_helpers.c testing_helpers.o -$(OUTPUT)/test_tag: testing_helpers.o -$(OUTPUT)/test_lirc_mode2_user: testing_helpers.o -$(OUTPUT)/xdping: testing_helpers.o -$(OUTPUT)/flow_dissector_load: testing_helpers.o -$(OUTPUT)/test_maps: testing_helpers.o -$(OUTPUT)/test_verifier: testing_helpers.o +CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o +TESTING_HELPERS := $(OUTPUT)/testing_helpers.o +TRACE_HELPERS := $(OUTPUT)/trace_helpers.o + +$(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_skb_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_sock: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_sock_addr: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS) +$(OUTPUT)/get_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_cgroup_storage: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS) +$(OUTPUT)/test_tag: $(TESTING_HELPERS) +$(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS) +$(OUTPUT)/xdping: $(TESTING_HELPERS) +$(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS) +$(OUTPUT)/test_maps: $(TESTING_HELPERS) +$(OUTPUT)/test_verifier: $(TESTING_HELPERS) BPFTOOL ?= $(DEFAULT_BPFTOOL) $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ @@ -325,9 +329,10 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \ linked_vars.skel.h linked_maps.skel.h LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \ - test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c + test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \ + map_ptr_kern.c core_kern.c # Generate both light skeleton and libbpf skeleton for these -LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c +LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test_subprog.c SKEL_BLACKLIST += $$(LSKELS) test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o @@ -531,14 +536,18 @@ $(OUTPUT)/bench_trigger.o: $(OUTPUT)/trigger_bench.skel.h $(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \ $(OUTPUT)/perfbuf_bench.skel.h $(OUTPUT)/bench_bloom_filter_map.o: $(OUTPUT)/bloom_filter_bench.skel.h +$(OUTPUT)/bench_bpf_loop.o: $(OUTPUT)/bpf_loop_bench.skel.h $(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ) $(OUTPUT)/bench: LDLIBS += -lm -$(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \ +$(OUTPUT)/bench: $(OUTPUT)/bench.o \ + $(TESTING_HELPERS) \ + $(TRACE_HELPERS) \ $(OUTPUT)/bench_count.o \ $(OUTPUT)/bench_rename.o \ $(OUTPUT)/bench_trigger.o \ $(OUTPUT)/bench_ringbufs.o \ - $(OUTPUT)/bench_bloom_filter_map.o + $(OUTPUT)/bench_bloom_filter_map.o \ + $(OUTPUT)/bench_bpf_loop.o $(call msg,BINARY,,$@) $(Q)$(CC) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index cc4722f693e9..3d6082b97a56 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -134,6 +134,39 @@ void hits_drops_report_final(struct bench_res res[], int res_cnt) total_ops_mean, total_ops_stddev); } +void ops_report_progress(int iter, struct bench_res *res, long delta_ns) +{ + double hits_per_sec, hits_per_prod; + + hits_per_sec = res->hits / 1000000.0 / (delta_ns / 1000000000.0); + hits_per_prod = hits_per_sec / env.producer_cnt; + + printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0); + + printf("hits %8.3lfM/s (%7.3lfM/prod)\n", hits_per_sec, hits_per_prod); +} + +void ops_report_final(struct bench_res res[], int res_cnt) +{ + double hits_mean = 0.0, hits_stddev = 0.0; + int i; + + for (i = 0; i < res_cnt; i++) + hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt); + + if (res_cnt > 1) { + for (i = 0; i < res_cnt; i++) + hits_stddev += (hits_mean - res[i].hits / 1000000.0) * + (hits_mean - res[i].hits / 1000000.0) / + (res_cnt - 1.0); + + hits_stddev = sqrt(hits_stddev); + } + printf("Summary: throughput %8.3lf \u00B1 %5.3lf M ops/s (%7.3lfM ops/prod), ", + hits_mean, hits_stddev, hits_mean / env.producer_cnt); + printf("latency %8.3lf ns/op\n", 1000.0 / hits_mean * env.producer_cnt); +} + const char *argp_program_version = "benchmark"; const char *argp_program_bug_address = "<bpf@vger.kernel.org>"; const char argp_program_doc[] = @@ -171,10 +204,12 @@ static const struct argp_option opts[] = { extern struct argp bench_ringbufs_argp; extern struct argp bench_bloom_map_argp; +extern struct argp bench_bpf_loop_argp; static const struct argp_child bench_parsers[] = { { &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 }, { &bench_bloom_map_argp, 0, "Bloom filter map benchmark", 0 }, + { &bench_bpf_loop_argp, 0, "bpf_loop helper benchmark", 0 }, {}, }; @@ -359,6 +394,11 @@ extern const struct bench bench_trig_kprobe; extern const struct bench bench_trig_fentry; extern const struct bench bench_trig_fentry_sleep; extern const struct bench bench_trig_fmodret; +extern const struct bench bench_trig_uprobe_base; +extern const struct bench bench_trig_uprobe_with_nop; +extern const struct bench bench_trig_uretprobe_with_nop; +extern const struct bench bench_trig_uprobe_without_nop; +extern const struct bench bench_trig_uretprobe_without_nop; extern const struct bench bench_rb_libbpf; extern const struct bench bench_rb_custom; extern const struct bench bench_pb_libbpf; @@ -368,6 +408,7 @@ extern const struct bench bench_bloom_update; extern const struct bench bench_bloom_false_positive; extern const struct bench bench_hashmap_without_bloom; extern const struct bench bench_hashmap_with_bloom; +extern const struct bench bench_bpf_loop; static const struct bench *benchs[] = { &bench_count_global, @@ -385,6 +426,11 @@ static const struct bench *benchs[] = { &bench_trig_fentry, &bench_trig_fentry_sleep, &bench_trig_fmodret, + &bench_trig_uprobe_base, + &bench_trig_uprobe_with_nop, + &bench_trig_uretprobe_with_nop, + &bench_trig_uprobe_without_nop, + &bench_trig_uretprobe_without_nop, &bench_rb_libbpf, &bench_rb_custom, &bench_pb_libbpf, @@ -394,6 +440,7 @@ static const struct bench *benchs[] = { &bench_bloom_false_positive, &bench_hashmap_without_bloom, &bench_hashmap_with_bloom, + &bench_bpf_loop, }; static void setup_benchmark() diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h index 624c6b11501f..50785503756b 100644 --- a/tools/testing/selftests/bpf/bench.h +++ b/tools/testing/selftests/bpf/bench.h @@ -59,6 +59,8 @@ void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns); void hits_drops_report_final(struct bench_res res[], int res_cnt); void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns); void false_hits_report_final(struct bench_res res[], int res_cnt); +void ops_report_progress(int iter, struct bench_res *res, long delta_ns); +void ops_report_final(struct bench_res res[], int res_cnt); static inline __u64 get_time_ns() { struct timespec t; diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c new file mode 100644 index 000000000000..d0a6572bfab6 --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <argp.h> +#include "bench.h" +#include "bpf_loop_bench.skel.h" + +/* BPF triggering benchmarks */ +static struct ctx { + struct bpf_loop_bench *skel; +} ctx; + +static struct { + __u32 nr_loops; +} args = { + .nr_loops = 10, +}; + +enum { + ARG_NR_LOOPS = 4000, +}; + +static const struct argp_option opts[] = { + { "nr_loops", ARG_NR_LOOPS, "nr_loops", 0, + "Set number of loops for the bpf_loop helper"}, + {}, +}; + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + switch (key) { + case ARG_NR_LOOPS: + args.nr_loops = strtol(arg, NULL, 10); + break; + default: + return ARGP_ERR_UNKNOWN; + } + + return 0; +} + +/* exported into benchmark runner */ +const struct argp bench_bpf_loop_argp = { + .options = opts, + .parser = parse_arg, +}; + +static void validate(void) +{ + if (env.consumer_cnt != 1) { + fprintf(stderr, "benchmark doesn't support multi-consumer!\n"); + exit(1); + } +} + +static void *producer(void *input) +{ + while (true) + /* trigger the bpf program */ + syscall(__NR_getpgid); + + return NULL; +} + +static void *consumer(void *input) +{ + return NULL; +} + +static void measure(struct bench_res *res) +{ + res->hits = atomic_swap(&ctx.skel->bss->hits, 0); +} + +static void setup(void) +{ + struct bpf_link *link; + + setup_libbpf(); + + ctx.skel = bpf_loop_bench__open_and_load(); + if (!ctx.skel) { + fprintf(stderr, "failed to open skeleton\n"); + exit(1); + } + + link = bpf_program__attach(ctx.skel->progs.benchmark); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } + + ctx.skel->bss->nr_loops = args.nr_loops; +} + +const struct bench bench_bpf_loop = { + .name = "bpf-loop", + .validate = validate, + .setup = setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = ops_report_progress, + .report_final = ops_report_final, +}; diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c index f41a491a8cc0..049a5ad56f65 100644 --- a/tools/testing/selftests/bpf/benchs/bench_trigger.c +++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c @@ -2,6 +2,7 @@ /* Copyright (c) 2020 Facebook */ #include "bench.h" #include "trigger_bench.skel.h" +#include "trace_helpers.h" /* BPF triggering benchmarks */ static struct trigger_ctx { @@ -107,6 +108,101 @@ static void *trigger_consumer(void *input) return NULL; } +/* make sure call is not inlined and not avoided by compiler, so __weak and + * inline asm volatile in the body of the function + * + * There is a performance difference between uprobing at nop location vs other + * instructions. So use two different targets, one of which starts with nop + * and another doesn't. + * + * GCC doesn't generate stack setup preample for these functions due to them + * having no input arguments and doing nothing in the body. + */ +__weak void uprobe_target_with_nop(void) +{ + asm volatile ("nop"); +} + +__weak void uprobe_target_without_nop(void) +{ + asm volatile (""); +} + +static void *uprobe_base_producer(void *input) +{ + while (true) { + uprobe_target_with_nop(); + atomic_inc(&base_hits.value); + } + return NULL; +} + +static void *uprobe_producer_with_nop(void *input) +{ + while (true) + uprobe_target_with_nop(); + return NULL; +} + +static void *uprobe_producer_without_nop(void *input) +{ + while (true) + uprobe_target_without_nop(); + return NULL; +} + +static void usetup(bool use_retprobe, bool use_nop) +{ + size_t uprobe_offset; + ssize_t base_addr; + struct bpf_link *link; + + setup_libbpf(); + + ctx.skel = trigger_bench__open_and_load(); + if (!ctx.skel) { + fprintf(stderr, "failed to open skeleton\n"); + exit(1); + } + + base_addr = get_base_addr(); + if (use_nop) + uprobe_offset = get_uprobe_offset(&uprobe_target_with_nop, base_addr); + else + uprobe_offset = get_uprobe_offset(&uprobe_target_without_nop, base_addr); + + link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe, + use_retprobe, + -1 /* all PIDs */, + "/proc/self/exe", + uprobe_offset); + if (!link) { + fprintf(stderr, "failed to attach uprobe!\n"); + exit(1); + } + ctx.skel->links.bench_trigger_uprobe = link; +} + +static void uprobe_setup_with_nop() +{ + usetup(false, true); +} + +static void uretprobe_setup_with_nop() +{ + usetup(true, true); +} + +static void uprobe_setup_without_nop() +{ + usetup(false, false); +} + +static void uretprobe_setup_without_nop() +{ + usetup(true, false); +} + const struct bench bench_trig_base = { .name = "trig-base", .validate = trigger_validate, @@ -182,3 +278,53 @@ const struct bench bench_trig_fmodret = { .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, }; + +const struct bench bench_trig_uprobe_base = { + .name = "trig-uprobe-base", + .setup = NULL, /* no uprobe/uretprobe is attached */ + .producer_thread = uprobe_base_producer, + .consumer_thread = trigger_consumer, + .measure = trigger_base_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_uprobe_with_nop = { + .name = "trig-uprobe-with-nop", + .setup = uprobe_setup_with_nop, + .producer_thread = uprobe_producer_with_nop, + .consumer_thread = trigger_consumer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_uretprobe_with_nop = { + .name = "trig-uretprobe-with-nop", + .setup = uretprobe_setup_with_nop, + .producer_thread = uprobe_producer_with_nop, + .consumer_thread = trigger_consumer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_uprobe_without_nop = { + .name = "trig-uprobe-without-nop", + .setup = uprobe_setup_without_nop, + .producer_thread = uprobe_producer_without_nop, + .consumer_thread = trigger_consumer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_uretprobe_without_nop = { + .name = "trig-uretprobe-without-nop", + .setup = uretprobe_setup_without_nop, + .producer_thread = uprobe_producer_without_nop, + .consumer_thread = trigger_consumer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bpf_loop.sh b/tools/testing/selftests/bpf/benchs/run_bench_bpf_loop.sh new file mode 100755 index 000000000000..d4f5f73b356b --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/run_bench_bpf_loop.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source ./benchs/run_common.sh + +set -eufo pipefail + +for t in 1 4 8 12 16; do +for i in 10 100 500 1000 5000 10000 50000 100000 500000 1000000; do +subtitle "nr_loops: $i, nr_threads: $t" + summarize_ops "bpf_loop: " \ + "$($RUN_BENCH -p $t --nr_loops $i bpf-loop)" + printf "\n" +done +done diff --git a/tools/testing/selftests/bpf/benchs/run_common.sh b/tools/testing/selftests/bpf/benchs/run_common.sh index 9a16be78b180..6c5e6023a69f 100644 --- a/tools/testing/selftests/bpf/benchs/run_common.sh +++ b/tools/testing/selftests/bpf/benchs/run_common.sh @@ -33,6 +33,14 @@ function percentage() echo "$*" | sed -E "s/.*Percentage\s=\s+([0-9]+\.[0-9]+).*/\1/" } +function ops() +{ + echo -n "throughput: " + echo -n "$*" | sed -E "s/.*throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*/\1/" + echo -n -e ", latency: " + echo "$*" | sed -E "s/.*latency\s+([0-9]+\.[0-9]+\sns\/op).*/\1/" +} + function total() { echo "$*" | sed -E "s/.*total operations\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/" @@ -52,6 +60,13 @@ function summarize_percentage() printf "%-20s %s%%\n" "$bench" "$(percentage $summary)" } +function summarize_ops() +{ + bench="$1" + summary=$(echo $2 | tail -n1) + printf "%-20s %s\n" "$bench" "$(ops $summary)" +} + function summarize_total() { bench="$1" diff --git a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c index f4d870da7684..78c76496b14a 100644 --- a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c @@ -68,13 +68,6 @@ static void map_batch_verify(int *visited, __u32 max_entries, int *keys, static void __test_map_lookup_and_update_batch(bool is_pcpu) { - struct bpf_create_map_attr xattr = { - .name = "array_map", - .map_type = is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY : - BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(int), - .value_size = sizeof(__s64), - }; int map_fd, *keys, *visited; __u32 count, total, total_success; const __u32 max_entries = 10; @@ -86,10 +79,10 @@ static void __test_map_lookup_and_update_batch(bool is_pcpu) .flags = 0, ); - xattr.max_entries = max_entries; - map_fd = bpf_create_map_xattr(&xattr); + map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY : BPF_MAP_TYPE_ARRAY, + "array_map", sizeof(int), sizeof(__s64), max_entries, NULL); CHECK(map_fd == -1, - "bpf_create_map_xattr()", "error:%s\n", strerror(errno)); + "bpf_map_create()", "error:%s\n", strerror(errno)); value_size = sizeof(__s64); if (is_pcpu) diff --git a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c index 976bf415fbdd..f807d53fd8dd 100644 --- a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c @@ -83,22 +83,15 @@ void __test_map_lookup_and_delete_batch(bool is_pcpu) int err, step, value_size; bool nospace_err; void *values; - struct bpf_create_map_attr xattr = { - .name = "hash_map", - .map_type = is_pcpu ? BPF_MAP_TYPE_PERCPU_HASH : - BPF_MAP_TYPE_HASH, - .key_size = sizeof(int), - .value_size = sizeof(int), - }; DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts, .elem_flags = 0, .flags = 0, ); - xattr.max_entries = max_entries; - map_fd = bpf_create_map_xattr(&xattr); + map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_HASH : BPF_MAP_TYPE_HASH, + "hash_map", sizeof(int), sizeof(int), max_entries, NULL); CHECK(map_fd == -1, - "bpf_create_map_xattr()", "error:%s\n", strerror(errno)); + "bpf_map_create()", "error:%s\n", strerror(errno)); value_size = is_pcpu ? sizeof(value) : sizeof(int); keys = malloc(max_entries * sizeof(int)); diff --git a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c index 2e986e5e4cac..87d07b596e17 100644 --- a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c @@ -64,13 +64,7 @@ static void map_batch_verify(int *visited, __u32 max_entries, void test_lpm_trie_map_batch_ops(void) { - struct bpf_create_map_attr xattr = { - .name = "lpm_trie_map", - .map_type = BPF_MAP_TYPE_LPM_TRIE, - .key_size = sizeof(struct test_lpm_key), - .value_size = sizeof(int), - .map_flags = BPF_F_NO_PREALLOC, - }; + LIBBPF_OPTS(bpf_map_create_opts, create_opts, .map_flags = BPF_F_NO_PREALLOC); struct test_lpm_key *keys, key; int map_fd, *values, *visited; __u32 step, count, total, total_success; @@ -82,9 +76,10 @@ void test_lpm_trie_map_batch_ops(void) .flags = 0, ); - xattr.max_entries = max_entries; - map_fd = bpf_create_map_xattr(&xattr); - CHECK(map_fd == -1, "bpf_create_map_xattr()", "error:%s\n", + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, "lpm_trie_map", + sizeof(struct test_lpm_key), sizeof(int), + max_entries, &create_opts); + CHECK(map_fd == -1, "bpf_map_create()", "error:%s\n", strerror(errno)); keys = malloc(max_entries * sizeof(struct test_lpm_key)); diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c index e569edc679d8..099eb4dfd4f7 100644 --- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c +++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c @@ -19,16 +19,12 @@ #include <test_btf.h> #include <test_maps.h> -static struct bpf_create_map_attr xattr = { - .name = "sk_storage_map", - .map_type = BPF_MAP_TYPE_SK_STORAGE, - .map_flags = BPF_F_NO_PREALLOC, - .max_entries = 0, - .key_size = 4, - .value_size = 8, +static struct bpf_map_create_opts map_opts = { + .sz = sizeof(map_opts), .btf_key_type_id = 1, .btf_value_type_id = 3, .btf_fd = -1, + .map_flags = BPF_F_NO_PREALLOC, }; static unsigned int nr_sk_threads_done; @@ -140,7 +136,7 @@ static int load_btf(void) memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types), btf_str_sec, sizeof(btf_str_sec)); - return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0); + return bpf_btf_load(raw_btf, sizeof(raw_btf), NULL); } static int create_sk_storage_map(void) @@ -150,13 +146,13 @@ static int create_sk_storage_map(void) btf_fd = load_btf(); CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n", btf_fd, errno); - xattr.btf_fd = btf_fd; + map_opts.btf_fd = btf_fd; - map_fd = bpf_create_map_xattr(&xattr); - xattr.btf_fd = -1; + map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts); + map_opts.btf_fd = -1; close(btf_fd); CHECK(map_fd == -1, - "bpf_create_map_xattr()", "errno:%d\n", errno); + "bpf_map_create()", "errno:%d\n", errno); return map_fd; } @@ -463,20 +459,20 @@ static void test_sk_storage_map_basic(void) int cnt; int lock; } value = { .cnt = 0xeB9f, .lock = 0, }, lookup_value; - struct bpf_create_map_attr bad_xattr; + struct bpf_map_create_opts bad_xattr; int btf_fd, map_fd, sk_fd, err; btf_fd = load_btf(); CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n", btf_fd, errno); - xattr.btf_fd = btf_fd; + map_opts.btf_fd = btf_fd; sk_fd = socket(AF_INET6, SOCK_STREAM, 0); CHECK(sk_fd == -1, "socket()", "sk_fd:%d errno:%d\n", sk_fd, errno); - map_fd = bpf_create_map_xattr(&xattr); - CHECK(map_fd == -1, "bpf_create_map_xattr(good_xattr)", + map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts); + CHECK(map_fd == -1, "bpf_map_create(good_xattr)", "map_fd:%d errno:%d\n", map_fd, errno); /* Add new elem */ @@ -560,31 +556,29 @@ static void test_sk_storage_map_basic(void) CHECK(!err || errno != ENOENT, "bpf_map_delete_elem()", "err:%d errno:%d\n", err, errno); - memcpy(&bad_xattr, &xattr, sizeof(xattr)); + memcpy(&bad_xattr, &map_opts, sizeof(map_opts)); bad_xattr.btf_key_type_id = 0; - err = bpf_create_map_xattr(&bad_xattr); - CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)", + err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr); + CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)", "err:%d errno:%d\n", err, errno); - memcpy(&bad_xattr, &xattr, sizeof(xattr)); + memcpy(&bad_xattr, &map_opts, sizeof(map_opts)); bad_xattr.btf_key_type_id = 3; - err = bpf_create_map_xattr(&bad_xattr); - CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)", + err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr); + CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)", "err:%d errno:%d\n", err, errno); - memcpy(&bad_xattr, &xattr, sizeof(xattr)); - bad_xattr.max_entries = 1; - err = bpf_create_map_xattr(&bad_xattr); - CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)", + err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 1, &map_opts); + CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)", "err:%d errno:%d\n", err, errno); - memcpy(&bad_xattr, &xattr, sizeof(xattr)); + memcpy(&bad_xattr, &map_opts, sizeof(map_opts)); bad_xattr.map_flags = 0; - err = bpf_create_map_xattr(&bad_xattr); + err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr); CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)", "err:%d errno:%d\n", err, errno); - xattr.btf_fd = -1; + map_opts.btf_fd = -1; close(btf_fd); close(map_fd); close(sk_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c index 0f9525293881..86b7d5d84eec 100644 --- a/tools/testing/selftests/bpf/prog_tests/atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/atomics.c @@ -167,7 +167,7 @@ static void test_cmpxchg(struct atomics_lskel *skel) prog_fd = skel->progs.cmpxchg.prog_fd; err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "test_run add", + if (CHECK(err || retval, "test_run cmpxchg", "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) goto cleanup; @@ -196,7 +196,7 @@ static void test_xchg(struct atomics_lskel *skel) prog_fd = skel->progs.xchg.prog_fd; err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "test_run add", + if (CHECK(err || retval, "test_run xchg", "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c index be73e3de6668..d2d9e965eba5 100644 --- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -7,32 +7,33 @@ static void test_fail_cases(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts); __u32 value; int fd, err; /* Invalid key size */ - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 4, sizeof(value), 100, 0); - if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid key size")) + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 4, sizeof(value), 100, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid key size")) close(fd); /* Invalid value size */ - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, 0, 100, 0); - if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid value size 0")) + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, 0, 100, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value size 0")) close(fd); /* Invalid max entries size */ - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 0, 0); - if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid max entries size")) + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 0, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid max entries size")) close(fd); /* Bloom filter maps do not support BPF_F_NO_PREALLOC */ - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, - BPF_F_NO_PREALLOC); - if (!ASSERT_LT(fd, 0, "bpf_create_map bloom filter invalid flags")) + opts.map_flags = BPF_F_NO_PREALLOC; + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid flags")) close(fd); - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, 0); - if (!ASSERT_GE(fd, 0, "bpf_create_map bloom filter")) + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, NULL); + if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter")) return; /* Test invalid flags */ @@ -56,13 +57,14 @@ static void test_fail_cases(void) static void test_success_cases(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts); char value[11]; int fd, err; /* Create a map */ - fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(value), 100, - BPF_F_ZERO_SEED | BPF_F_NUMA_NODE); - if (!ASSERT_GE(fd, 0, "bpf_create_map bloom filter success case")) + opts.map_flags = BPF_F_ZERO_SEED | BPF_F_NUMA_NODE; + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts); + if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter success case")) return; /* Add a value to the bloom filter */ @@ -100,9 +102,9 @@ static void test_inner_map(struct bloom_filter_map *skel, const __u32 *rand_vals struct bpf_link *link; /* Create a bloom filter map that will be used as the inner map */ - inner_map_fd = bpf_create_map(BPF_MAP_TYPE_BLOOM_FILTER, 0, sizeof(*rand_vals), - nr_rand_vals, 0); - if (!ASSERT_GE(inner_map_fd, 0, "bpf_create_map bloom filter inner map")) + inner_map_fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(*rand_vals), + nr_rand_vals, NULL); + if (!ASSERT_GE(inner_map_fd, 0, "bpf_map_create bloom filter inner map")) return; for (i = 0; i < nr_rand_vals; i++) { diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 3e10abce3e5a..b84f859b1267 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -469,12 +469,12 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) * fills seq_file buffer and then the other will trigger * overflow and needs restart. */ - map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); - if (CHECK(map1_fd < 0, "bpf_create_map", + map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); + if (CHECK(map1_fd < 0, "bpf_map_create", "map_creation failed: %s\n", strerror(errno))) goto out; - map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); - if (CHECK(map2_fd < 0, "bpf_create_map", + map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); + if (CHECK(map2_fd < 0, "bpf_map_create", "map_creation failed: %s\n", strerror(errno))) goto free_map1; @@ -1206,13 +1206,14 @@ static void test_task_vma(void) goto out; /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks - * to trigger seq_file corner cases. The expected output is much - * longer than 1kB, so the while loop will terminate. + * to trigger seq_file corner cases. */ len = 0; while (len < CMP_BUFFER_SIZE) { err = read_fd_into_buffer(iter_fd, task_vma_output + len, min(read_size, CMP_BUFFER_SIZE - len)); + if (!err) + break; if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n")) goto out; len += err; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_loop.c b/tools/testing/selftests/bpf/prog_tests/bpf_loop.c new file mode 100644 index 000000000000..380d7a2072e3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_loop.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> +#include <network_helpers.h> +#include "bpf_loop.skel.h" + +static void check_nr_loops(struct bpf_loop *skel) +{ + struct bpf_link *link; + + link = bpf_program__attach(skel->progs.test_prog); + if (!ASSERT_OK_PTR(link, "link")) + return; + + /* test 0 loops */ + skel->bss->nr_loops = 0; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, + "0 loops"); + + /* test 500 loops */ + skel->bss->nr_loops = 500; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, + "500 loops"); + ASSERT_EQ(skel->bss->g_output, (500 * 499) / 2, "g_output"); + + /* test exceeding the max limit */ + skel->bss->nr_loops = -1; + + usleep(1); + + ASSERT_EQ(skel->bss->err, -E2BIG, "over max limit"); + + bpf_link__destroy(link); +} + +static void check_callback_fn_stop(struct bpf_loop *skel) +{ + struct bpf_link *link; + + link = bpf_program__attach(skel->progs.test_prog); + if (!ASSERT_OK_PTR(link, "link")) + return; + + /* testing that loop is stopped when callback_fn returns 1 */ + skel->bss->nr_loops = 400; + skel->data->stop_index = 50; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->data->stop_index + 1, + "nr_loops_returned"); + ASSERT_EQ(skel->bss->g_output, (50 * 49) / 2, + "g_output"); + + bpf_link__destroy(link); +} + +static void check_null_callback_ctx(struct bpf_loop *skel) +{ + struct bpf_link *link; + + /* check that user is able to pass in a null callback_ctx */ + link = bpf_program__attach(skel->progs.prog_null_ctx); + if (!ASSERT_OK_PTR(link, "link")) + return; + + skel->bss->nr_loops = 10; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, + "nr_loops_returned"); + + bpf_link__destroy(link); +} + +static void check_invalid_flags(struct bpf_loop *skel) +{ + struct bpf_link *link; + + /* check that passing in non-zero flags returns -EINVAL */ + link = bpf_program__attach(skel->progs.prog_invalid_flags); + if (!ASSERT_OK_PTR(link, "link")) + return; + + usleep(1); + + ASSERT_EQ(skel->bss->err, -EINVAL, "err"); + + bpf_link__destroy(link); +} + +static void check_nested_calls(struct bpf_loop *skel) +{ + __u32 nr_loops = 100, nested_callback_nr_loops = 4; + struct bpf_link *link; + + /* check that nested calls are supported */ + link = bpf_program__attach(skel->progs.prog_nested_calls); + if (!ASSERT_OK_PTR(link, "link")) + return; + + skel->bss->nr_loops = nr_loops; + skel->bss->nested_callback_nr_loops = nested_callback_nr_loops; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, nr_loops * nested_callback_nr_loops + * nested_callback_nr_loops, "nr_loops_returned"); + ASSERT_EQ(skel->bss->g_output, (4 * 3) / 2 * nested_callback_nr_loops + * nr_loops, "g_output"); + + bpf_link__destroy(link); +} + +void test_bpf_loop(void) +{ + struct bpf_loop *skel; + + skel = bpf_loop__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_loop__open_and_load")) + return; + + skel->bss->pid = getpid(); + + if (test__start_subtest("check_nr_loops")) + check_nr_loops(skel); + if (test__start_subtest("check_callback_fn_stop")) + check_callback_fn_stop(skel); + if (test__start_subtest("check_null_callback_ctx")) + check_null_callback_ctx(skel); + if (test__start_subtest("check_invalid_flags")) + check_invalid_flags(skel); + if (test__start_subtest("check_nested_calls")) + check_nested_calls(skel); + + bpf_loop__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 94e03df69d71..8daca0ac909f 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -217,14 +217,16 @@ static bool found; static int libbpf_debug_print(enum libbpf_print_level level, const char *format, va_list args) { - char *log_buf; + const char *log_buf; if (level != LIBBPF_WARN || - strcmp(format, "libbpf: \n%s\n")) { + !strstr(format, "-- BEGIN PROG LOAD LOG --")) { vprintf(format, args); return 0; } + /* skip prog_name */ + va_arg(args, char *); log_buf = va_arg(args, char *); if (!log_buf) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index 27f5d8ea7964..ff6cce9fef06 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -19,16 +19,28 @@ extern int extra_prog_load_log_flags; static int check_load(const char *file, enum bpf_prog_type type) { - struct bpf_prog_load_attr attr; struct bpf_object *obj = NULL; - int err, prog_fd; - - memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); - attr.file = file; - attr.prog_type = type; - attr.log_level = 4 | extra_prog_load_log_flags; - attr.prog_flags = BPF_F_TEST_RND_HI32; - err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); + struct bpf_program *prog; + int err; + + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (err) + return err; + + prog = bpf_object__next_program(obj, NULL); + if (!prog) { + err = -ENOENT; + goto err_out; + } + + bpf_program__set_type(prog, type); + bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32); + bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags); + + err = bpf_object__load(obj); + +err_out: bpf_object__close(obj); return err; } @@ -115,6 +127,12 @@ void test_verif_scale_pyperf600() scale_test("pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } +void test_verif_scale_pyperf600_bpf_loop(void) +{ + /* use the bpf_loop helper*/ + scale_test("pyperf600_bpf_loop.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + void test_verif_scale_pyperf600_nounroll() { /* no unroll at all. @@ -165,6 +183,12 @@ void test_verif_scale_strobemeta() scale_test("strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } +void test_verif_scale_strobemeta_bpf_loop(void) +{ + /* use the bpf_loop helper*/ + scale_test("strobemeta_bpf_loop.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + void test_verif_scale_strobemeta_nounroll1() { /* no unroll, tiny loops */ diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 4aa6343dc4c8..01b776a7beeb 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -4071,10 +4071,32 @@ done: return raw_btf; } +static int load_raw_btf(const void *raw_data, size_t raw_size) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + int btf_fd; + + if (always_log) { + opts.log_buf = btf_log_buf, + opts.log_size = BTF_LOG_BUF_SIZE, + opts.log_level = 1; + } + + btf_fd = bpf_btf_load(raw_data, raw_size, &opts); + if (btf_fd < 0 && !always_log) { + opts.log_buf = btf_log_buf, + opts.log_size = BTF_LOG_BUF_SIZE, + opts.log_level = 1; + btf_fd = bpf_btf_load(raw_data, raw_size, &opts); + } + + return btf_fd; +} + static void do_test_raw(unsigned int test_num) { struct btf_raw_test *test = &raw_tests[test_num - 1]; - struct bpf_create_map_attr create_attr = {}; + LIBBPF_OPTS(bpf_map_create_opts, opts); int map_fd = -1, btf_fd = -1; unsigned int raw_btf_size; struct btf_header *hdr; @@ -4100,16 +4122,14 @@ static void do_test_raw(unsigned int test_num) hdr->str_len = (int)hdr->str_len + test->str_len_delta; *btf_log_buf = '\0'; - btf_fd = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd = load_raw_btf(raw_btf, raw_btf_size); free(raw_btf); err = ((btf_fd < 0) != test->btf_load_err); if (CHECK(err, "btf_fd:%d test->btf_load_err:%u", btf_fd, test->btf_load_err) || CHECK(test->err_str && !strstr(btf_log_buf, test->err_str), - "expected err_str:%s", test->err_str)) { + "expected err_str:%s\n", test->err_str)) { err = -1; goto done; } @@ -4117,16 +4137,11 @@ static void do_test_raw(unsigned int test_num) if (err || btf_fd < 0) goto done; - create_attr.name = test->map_name; - create_attr.map_type = test->map_type; - create_attr.key_size = test->key_size; - create_attr.value_size = test->value_size; - create_attr.max_entries = test->max_entries; - create_attr.btf_fd = btf_fd; - create_attr.btf_key_type_id = test->key_type_id; - create_attr.btf_value_type_id = test->value_type_id; - - map_fd = bpf_create_map_xattr(&create_attr); + opts.btf_fd = btf_fd; + opts.btf_key_type_id = test->key_type_id; + opts.btf_value_type_id = test->value_type_id; + map_fd = bpf_map_create(test->map_type, test->map_name, + test->key_size, test->value_size, test->max_entries, &opts); err = ((map_fd < 0) != test->map_create_err); CHECK(err, "map_fd:%d test->map_create_err:%u", @@ -4232,9 +4247,7 @@ static int test_big_btf_info(unsigned int test_num) goto done; } - btf_fd = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd = load_raw_btf(raw_btf, raw_btf_size); if (CHECK(btf_fd < 0, "errno:%d", errno)) { err = -1; goto done; @@ -4290,7 +4303,7 @@ done: static int test_btf_id(unsigned int test_num) { const struct btf_get_info_test *test = &get_info_tests[test_num - 1]; - struct bpf_create_map_attr create_attr = {}; + LIBBPF_OPTS(bpf_map_create_opts, opts); uint8_t *raw_btf = NULL, *user_btf[2] = {}; int btf_fd[2] = {-1, -1}, map_fd = -1; struct bpf_map_info map_info = {}; @@ -4320,9 +4333,7 @@ static int test_btf_id(unsigned int test_num) info[i].btf_size = raw_btf_size; } - btf_fd[0] = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd[0] = load_raw_btf(raw_btf, raw_btf_size); if (CHECK(btf_fd[0] < 0, "errno:%d", errno)) { err = -1; goto done; @@ -4355,16 +4366,11 @@ static int test_btf_id(unsigned int test_num) } /* Test btf members in struct bpf_map_info */ - create_attr.name = "test_btf_id"; - create_attr.map_type = BPF_MAP_TYPE_ARRAY; - create_attr.key_size = sizeof(int); - create_attr.value_size = sizeof(unsigned int); - create_attr.max_entries = 4; - create_attr.btf_fd = btf_fd[0]; - create_attr.btf_key_type_id = 1; - create_attr.btf_value_type_id = 2; - - map_fd = bpf_create_map_xattr(&create_attr); + opts.btf_fd = btf_fd[0]; + opts.btf_key_type_id = 1; + opts.btf_value_type_id = 2; + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_btf_id", + sizeof(int), sizeof(int), 4, &opts); if (CHECK(map_fd < 0, "errno:%d", errno)) { err = -1; goto done; @@ -4457,9 +4463,7 @@ static void do_test_get_info(unsigned int test_num) goto done; } - btf_fd = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd = load_raw_btf(raw_btf, raw_btf_size); if (CHECK(btf_fd <= 0, "errno:%d", errno)) { err = -1; goto done; @@ -5153,7 +5157,7 @@ static void do_test_pprint(int test_num) { const struct btf_raw_test *test = &pprint_test_template[test_num]; enum pprint_mapv_kind_t mapv_kind = test->mapv_kind; - struct bpf_create_map_attr create_attr = {}; + LIBBPF_OPTS(bpf_map_create_opts, opts); bool ordered_map, lossless_map, percpu_map; int err, ret, num_cpus, rounded_value_size; unsigned int key, nr_read_elems; @@ -5179,26 +5183,19 @@ static void do_test_pprint(int test_num) return; *btf_log_buf = '\0'; - btf_fd = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd = load_raw_btf(raw_btf, raw_btf_size); free(raw_btf); - if (CHECK(btf_fd < 0, "errno:%d", errno)) { + if (CHECK(btf_fd < 0, "errno:%d\n", errno)) { err = -1; goto done; } - create_attr.name = test->map_name; - create_attr.map_type = test->map_type; - create_attr.key_size = test->key_size; - create_attr.value_size = test->value_size; - create_attr.max_entries = test->max_entries; - create_attr.btf_fd = btf_fd; - create_attr.btf_key_type_id = test->key_type_id; - create_attr.btf_value_type_id = test->value_type_id; - - map_fd = bpf_create_map_xattr(&create_attr); + opts.btf_fd = btf_fd; + opts.btf_key_type_id = test->key_type_id; + opts.btf_value_type_id = test->value_type_id; + map_fd = bpf_map_create(test->map_type, test->map_name, + test->key_size, test->value_size, test->max_entries, &opts); if (CHECK(map_fd < 0, "errno:%d", errno)) { err = -1; goto done; @@ -6553,9 +6550,7 @@ static void do_test_info_raw(unsigned int test_num) return; *btf_log_buf = '\0'; - btf_fd = bpf_load_btf(raw_btf, raw_btf_size, - btf_log_buf, BTF_LOG_BUF_SIZE, - always_log); + btf_fd = load_raw_btf(raw_btf, raw_btf_size); free(raw_btf); if (CHECK(btf_fd < 0, "invalid btf_fd errno:%d", errno)) { @@ -7352,6 +7347,32 @@ static struct btf_dedup_test dedup_tests[] = { BTF_STR_SEC("\0tag1"), }, }, +{ + .descr = "dedup: btf_type_tag #5, struct", + .input = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [3] */ + BTF_MEMBER_ENC(NAME_NTH(3), 2, BTF_MEMBER_OFFSET(0, 0)), + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [4] */ + BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [5] */ + BTF_MEMBER_ENC(NAME_NTH(3), 4, BTF_MEMBER_OFFSET(0, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0t\0m"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [3] */ + BTF_MEMBER_ENC(NAME_NTH(3), 2, BTF_MEMBER_OFFSET(0, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0t\0m"), + }, +}, }; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c index 9d3b8d7a1537..90aac437576d 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c @@ -314,6 +314,117 @@ cleanup: btf__free(btf1); } +static void btf_add_dup_struct_in_cu(struct btf *btf, int start_id) +{ +#define ID(n) (start_id + n) + btf__set_pointer_size(btf, 8); /* enforce 64-bit arch */ + + btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */ + + btf__add_struct(btf, "s", 8); /* [2] struct s { */ + btf__add_field(btf, "a", ID(3), 0, 0); /* struct anon a; */ + btf__add_field(btf, "b", ID(4), 0, 0); /* struct anon b; */ + /* } */ + + btf__add_struct(btf, "(anon)", 8); /* [3] struct anon { */ + btf__add_field(btf, "f1", ID(1), 0, 0); /* int f1; */ + btf__add_field(btf, "f2", ID(1), 32, 0); /* int f2; */ + /* } */ + + btf__add_struct(btf, "(anon)", 8); /* [4] struct anon { */ + btf__add_field(btf, "f1", ID(1), 0, 0); /* int f1; */ + btf__add_field(btf, "f2", ID(1), 32, 0); /* int f2; */ + /* } */ +#undef ID +} + +static void test_split_dup_struct_in_cu() +{ + struct btf *btf1, *btf2 = NULL; + int err; + + /* generate the base data.. */ + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + + btf_add_dup_struct_in_cu(btf1, 0); + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=4 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32", + "[4] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32"); + + /* ..dedup them... */ + err = btf__dedup(btf1, NULL); + if (!ASSERT_OK(err, "btf_dedup")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=3 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32"); + + /* and add the same data on top of it */ + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + + btf_add_dup_struct_in_cu(btf2, 3); + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=3 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32", + "[4] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[5] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=6 bits_offset=0\n" + "\t'b' type_id=7 bits_offset=0", + "[6] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=4 bits_offset=0\n" + "\t'f2' type_id=4 bits_offset=32", + "[7] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=4 bits_offset=0\n" + "\t'f2' type_id=4 bits_offset=32"); + + err = btf__dedup(btf2, NULL); + if (!ASSERT_OK(err, "btf_dedup")) + goto cleanup; + + /* after dedup it should match the original data */ + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=3 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32"); + +cleanup: + btf__free(btf2); + btf__free(btf1); +} + void test_btf_dedup_split() { if (test__start_subtest("split_simple")) @@ -322,4 +433,6 @@ void test_btf_dedup_split() test_split_struct_duped(); if (test__start_subtest("split_fwd_resolve")) test_split_fwd_resolve(); + if (test__start_subtest("split_dup_struct_in_cu")) + test_split_dup_struct_in_cu(); } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index d6272013a5a3..9e26903f9170 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -323,7 +323,7 @@ static void test_btf_dump_int_data(struct btf *btf, struct btf_dump *d, char *str) { #ifdef __SIZEOF_INT128__ - __int128 i = 0xffffffffffffffff; + unsigned __int128 i = 0xffffffffffffffff; /* this dance is required because we cannot directly initialize * a 128-bit value to anything larger than a 64-bit value. @@ -756,7 +756,7 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d, /* overflow bpf_sock_ops struct with final element nonzero/zero. * Regardless of the value of the final field, we don't have all the * data we need to display it, so we should trigger an overflow. - * In other words oveflow checking should trump "is field zero?" + * In other words overflow checking should trump "is field zero?" * checks because if we've overflowed, it shouldn't matter what the * field is - we can't trust its value so shouldn't display it. */ diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c index de9c3e12b0ea..d3e8f729c623 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c @@ -15,22 +15,22 @@ static int prog_load_cnt(int verdict, int val) int cgroup_storage_fd, percpu_cgroup_storage_fd; if (map_fd < 0) - map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); if (map_fd < 0) { printf("failed to create map '%s'\n", strerror(errno)); return -1; } - cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, - sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + cgroup_storage_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, + sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL); if (cgroup_storage_fd < 0) { printf("failed to create map '%s'\n", strerror(errno)); return -1; } - percpu_cgroup_storage_fd = bpf_create_map( - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, - sizeof(struct bpf_cgroup_storage_key), 8, 0, 0); + percpu_cgroup_storage_fd = bpf_map_create( + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL, + sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL); if (percpu_cgroup_storage_fd < 0) { printf("failed to create map '%s'\n", strerror(errno)); return -1; diff --git a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c index 9229db2f5ca5..ca574e1e30e6 100644 --- a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c +++ b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c @@ -51,19 +51,20 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type) bool v4 = family == AF_INET; __u16 expected_local_port = v4 ? 22222 : 22223; __u16 expected_peer_port = 60000; - struct bpf_prog_load_attr attr = { - .file = v4 ? "./connect_force_port4.o" : - "./connect_force_port6.o", - }; struct bpf_program *prog; struct bpf_object *obj; - int xlate_fd, fd, err; + const char *obj_file = v4 ? "connect_force_port4.o" : "connect_force_port6.o"; + int fd, err; __u32 duration = 0; - err = bpf_prog_load_xattr(&attr, &obj, &xlate_fd); - if (err) { - log_err("Failed to load BPF object"); + obj = bpf_object__open_file(obj_file, NULL); + if (!ASSERT_OK_PTR(obj, "bpf_obj_open")) return -1; + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "bpf_obj_load")) { + err = -EIO; + goto close_bpf_object; } prog = bpf_object__find_program_by_title(obj, v4 ? diff --git a/tools/testing/selftests/bpf/prog_tests/core_kern.c b/tools/testing/selftests/bpf/prog_tests/core_kern.c new file mode 100644 index 000000000000..561c5185d886 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_kern.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include "test_progs.h" +#include "core_kern.lskel.h" + +void test_core_kern_lskel(void) +{ + struct core_kern_lskel *skel; + + skel = core_kern_lskel__open_and_load(); + ASSERT_OK_PTR(skel, "open_and_load"); + core_kern_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 1041d0c593f6..44a9868c70ea 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -881,7 +881,8 @@ void test_core_reloc(void) data = mmap_data; memset(mmap_data, 0, sizeof(*data)); - memcpy(data->in, test_case->input, test_case->input_len); + if (test_case->input_len) + memcpy(data->in, test_case->input, test_case->input_len); data->my_pid_tgid = my_pid_tgid; link = bpf_program__attach_raw_tracepoint(prog, tp_name); diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c index 4184c399d4c6..977ab433a946 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -24,13 +24,19 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) { bool good_kern_stack = false, good_user_stack = false; const char *nonjit_func = "___bpf_prog_run"; - struct get_stack_trace_t *e = data; + /* perfbuf-submitted data is 4-byte aligned, but we need 8-byte + * alignment, so copy data into a local variable, for simplicity + */ + struct get_stack_trace_t e; int i, num_stack; static __u64 cnt; struct ksym *ks; cnt++; + memset(&e, 0, sizeof(e)); + memcpy(&e, data, size <= sizeof(e) ? size : sizeof(e)); + if (size < sizeof(struct get_stack_trace_t)) { __u64 *raw_data = data; bool found = false; @@ -57,19 +63,19 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) good_user_stack = true; } } else { - num_stack = e->kern_stack_size / sizeof(__u64); + num_stack = e.kern_stack_size / sizeof(__u64); if (env.jit_enabled) { good_kern_stack = num_stack > 0; } else { for (i = 0; i < num_stack; i++) { - ks = ksym_search(e->kern_stack[i]); + ks = ksym_search(e.kern_stack[i]); if (ks && (strcmp(ks->name, nonjit_func) == 0)) { good_kern_stack = true; break; } } } - if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0) + if (e.user_stack_size > 0 && e.user_stack_buildid_size > 0) good_user_stack = true; } diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index 2a49f8fcde06..ce10d2fc3a6c 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> #include <network_helpers.h> +#include "kfree_skb.skel.h" struct meta { int ifindex; @@ -58,16 +59,11 @@ void serial_test_kfree_skb(void) .ctx_in = &skb, .ctx_size_in = sizeof(skb), }; - struct bpf_prog_load_attr attr = { - .file = "./kfree_skb.o", - }; - - struct bpf_link *link = NULL, *link_fentry = NULL, *link_fexit = NULL; - struct bpf_map *perf_buf_map, *global_data; - struct bpf_program *prog, *fentry, *fexit; - struct bpf_object *obj, *obj2 = NULL; + struct kfree_skb *skel = NULL; + struct bpf_link *link; + struct bpf_object *obj; struct perf_buffer *pb = NULL; - int err, kfree_skb_fd; + int err; bool passed = false; __u32 duration = 0; const int zero = 0; @@ -78,40 +74,27 @@ void serial_test_kfree_skb(void) if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) return; - err = bpf_prog_load_xattr(&attr, &obj2, &kfree_skb_fd); - if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) - goto close_prog; - - prog = bpf_object__find_program_by_title(obj2, "tp_btf/kfree_skb"); - if (CHECK(!prog, "find_prog", "prog kfree_skb not found\n")) - goto close_prog; - fentry = bpf_object__find_program_by_title(obj2, "fentry/eth_type_trans"); - if (CHECK(!fentry, "find_prog", "prog eth_type_trans not found\n")) - goto close_prog; - fexit = bpf_object__find_program_by_title(obj2, "fexit/eth_type_trans"); - if (CHECK(!fexit, "find_prog", "prog eth_type_trans not found\n")) - goto close_prog; - - global_data = bpf_object__find_map_by_name(obj2, ".bss"); - if (CHECK(!global_data, "find global data", "not found\n")) + skel = kfree_skb__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kfree_skb_skel")) goto close_prog; - link = bpf_program__attach_raw_tracepoint(prog, NULL); + link = bpf_program__attach_raw_tracepoint(skel->progs.trace_kfree_skb, NULL); if (!ASSERT_OK_PTR(link, "attach_raw_tp")) goto close_prog; - link_fentry = bpf_program__attach_trace(fentry); - if (!ASSERT_OK_PTR(link_fentry, "attach fentry")) - goto close_prog; - link_fexit = bpf_program__attach_trace(fexit); - if (!ASSERT_OK_PTR(link_fexit, "attach fexit")) + skel->links.trace_kfree_skb = link; + + link = bpf_program__attach_trace(skel->progs.fentry_eth_type_trans); + if (!ASSERT_OK_PTR(link, "attach fentry")) goto close_prog; + skel->links.fentry_eth_type_trans = link; - perf_buf_map = bpf_object__find_map_by_name(obj2, "perf_buf_map"); - if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) + link = bpf_program__attach_trace(skel->progs.fexit_eth_type_trans); + if (!ASSERT_OK_PTR(link, "attach fexit")) goto close_prog; + skel->links.fexit_eth_type_trans = link; /* set up perf buffer */ - pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, + pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, on_sample, NULL, &passed, NULL); if (!ASSERT_OK_PTR(pb, "perf_buf__new")) goto close_prog; @@ -133,7 +116,7 @@ void serial_test_kfree_skb(void) */ ASSERT_TRUE(passed, "passed"); - err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok); + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.bss), &zero, test_ok); if (CHECK(err, "get_result", "failed to get output data: %d\n", err)) goto close_prog; @@ -141,9 +124,6 @@ void serial_test_kfree_skb(void) CHECK_FAIL(!test_ok[0] || !test_ok[1]); close_prog: perf_buffer__free(pb); - bpf_link__destroy(link); - bpf_link__destroy(link_fentry); - bpf_link__destroy(link_fexit); bpf_object__close(obj); - bpf_object__close(obj2); + kfree_skb__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 5c9c0176991b..7d7445ccc141 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -4,6 +4,7 @@ #include <network_helpers.h> #include "kfunc_call_test.lskel.h" #include "kfunc_call_test_subprog.skel.h" +#include "kfunc_call_test_subprog.lskel.h" static void test_main(void) { @@ -49,6 +50,26 @@ static void test_subprog(void) kfunc_call_test_subprog__destroy(skel); } +static void test_subprog_lskel(void) +{ + struct kfunc_call_test_subprog_lskel *skel; + int prog_fd, retval, err; + + skel = kfunc_call_test_subprog_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + return; + + prog_fd = skel->progs.kfunc_call_test1.prog_fd; + err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, (__u32 *)&retval, NULL); + ASSERT_OK(err, "bpf_prog_test_run(test1)"); + ASSERT_EQ(retval, 10, "test1-retval"); + ASSERT_NEQ(skel->data->active_res, -1, "active_res"); + ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res"); + + kfunc_call_test_subprog_lskel__destroy(skel); +} + void test_kfunc_call(void) { if (test__start_subtest("main")) @@ -56,4 +77,7 @@ void test_kfunc_call(void) if (test__start_subtest("subprog")) test_subprog(); + + if (test__start_subtest("subprog_lskel")) + test_subprog_lskel(); } diff --git a/tools/testing/selftests/bpf/prog_tests/legacy_printk.c b/tools/testing/selftests/bpf/prog_tests/legacy_printk.c new file mode 100644 index 000000000000..ec6e45f2a644 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/legacy_printk.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include "test_legacy_printk.skel.h" + +static int execute_one_variant(bool legacy) +{ + struct test_legacy_printk *skel; + int err, zero = 0, my_pid = getpid(), res, map_fd; + + skel = test_legacy_printk__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return -errno; + + bpf_program__set_autoload(skel->progs.handle_legacy, legacy); + bpf_program__set_autoload(skel->progs.handle_modern, !legacy); + + err = test_legacy_printk__load(skel); + /* no ASSERT_OK, we expect one of two variants can fail here */ + if (err) + goto err_out; + + if (legacy) { + map_fd = bpf_map__fd(skel->maps.my_pid_map); + err = bpf_map_update_elem(map_fd, &zero, &my_pid, BPF_ANY); + if (!ASSERT_OK(err, "my_pid_map_update")) + goto err_out; + err = bpf_map_lookup_elem(map_fd, &zero, &res); + } else { + skel->bss->my_pid_var = my_pid; + } + + err = test_legacy_printk__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto err_out; + + usleep(1); /* trigger */ + + if (legacy) { + map_fd = bpf_map__fd(skel->maps.res_map); + err = bpf_map_lookup_elem(map_fd, &zero, &res); + if (!ASSERT_OK(err, "res_map_lookup")) + goto err_out; + } else { + res = skel->bss->res_var; + } + + if (!ASSERT_GT(res, 0, "res")) { + err = -EINVAL; + goto err_out; + } + +err_out: + test_legacy_printk__destroy(skel); + return err; +} + +void test_legacy_printk(void) +{ + /* legacy variant should work everywhere */ + ASSERT_OK(execute_one_variant(true /* legacy */), "legacy_case"); + + /* execute modern variant, can fail the load on old kernels */ + execute_one_variant(false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c new file mode 100644 index 000000000000..e469b023962b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> +#include <bpf/btf.h> + +#include "test_log_buf.skel.h" + +static size_t libbpf_log_pos; +static char libbpf_log_buf[1024 * 1024]; +static bool libbpf_log_error; + +static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, va_list args) +{ + int emitted_cnt; + size_t left_cnt; + + left_cnt = sizeof(libbpf_log_buf) - libbpf_log_pos; + emitted_cnt = vsnprintf(libbpf_log_buf + libbpf_log_pos, left_cnt, fmt, args); + + if (emitted_cnt < 0 || emitted_cnt + 1 > left_cnt) { + libbpf_log_error = true; + return 0; + } + + libbpf_log_pos += emitted_cnt; + return 0; +} + +static void obj_load_log_buf(void) +{ + libbpf_print_fn_t old_print_cb = libbpf_set_print(libbpf_print_cb); + LIBBPF_OPTS(bpf_object_open_opts, opts); + const size_t log_buf_sz = 1024 * 1024; + struct test_log_buf* skel; + char *obj_log_buf, *good_log_buf, *bad_log_buf; + int err; + + obj_log_buf = malloc(3 * log_buf_sz); + if (!ASSERT_OK_PTR(obj_log_buf, "obj_log_buf")) + return; + + good_log_buf = obj_log_buf + log_buf_sz; + bad_log_buf = obj_log_buf + 2 * log_buf_sz; + obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0'; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 4; /* for BTF this will turn into 1 */ + + /* In the first round every prog has its own log_buf, so libbpf logs + * don't have program failure logs + */ + skel = test_log_buf__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + /* set very verbose level for good_prog so we always get detailed logs */ + bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz); + bpf_program__set_log_level(skel->progs.good_prog, 2); + + bpf_program__set_log_buf(skel->progs.bad_prog, bad_log_buf, log_buf_sz); + /* log_level 0 with custom log_buf means that verbose logs are not + * requested if program load is successful, but libbpf should retry + * with log_level 1 on error and put program's verbose load log into + * custom log_buf + */ + bpf_program__set_log_level(skel->progs.bad_prog, 0); + + err = test_log_buf__load(skel); + if (!ASSERT_ERR(err, "unexpected_load_success")) + goto cleanup; + + ASSERT_FALSE(libbpf_log_error, "libbpf_log_error"); + + /* there should be no prog loading log because we specified per-prog log buf */ + ASSERT_NULL(strstr(libbpf_log_buf, "-- BEGIN PROG LOAD LOG --"), "unexp_libbpf_log"); + ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"), + "libbpf_log_not_empty"); + ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty"); + ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), + "good_log_verbose"); + ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"), + "bad_log_not_empty"); + + if (env.verbosity > VERBOSE_NONE) { + printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf); + printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf); + printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf); + printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf); + } + + /* reset everything */ + test_log_buf__destroy(skel); + obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0'; + libbpf_log_buf[0] = '\0'; + libbpf_log_pos = 0; + libbpf_log_error = false; + + /* In the second round we let bad_prog's failure be logged through print callback */ + opts.kernel_log_buf = NULL; /* let everything through into print callback */ + opts.kernel_log_size = 0; + opts.kernel_log_level = 1; + + skel = test_log_buf__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + /* set normal verbose level for good_prog to check log_level is taken into account */ + bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz); + bpf_program__set_log_level(skel->progs.good_prog, 1); + + err = test_log_buf__load(skel); + if (!ASSERT_ERR(err, "unexpected_load_success")) + goto cleanup; + + ASSERT_FALSE(libbpf_log_error, "libbpf_log_error"); + + /* this time prog loading error should be logged through print callback */ + ASSERT_OK_PTR(strstr(libbpf_log_buf, "libbpf: prog 'bad_prog': -- BEGIN PROG LOAD LOG --"), + "libbpf_log_correct"); + ASSERT_STREQ(obj_log_buf, "", "obj_log__empty"); + ASSERT_STREQ(good_log_buf, "processed 4 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n", + "good_log_ok"); + ASSERT_STREQ(bad_log_buf, "", "bad_log_empty"); + + if (env.verbosity > VERBOSE_NONE) { + printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf); + printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf); + printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf); + printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf); + } + +cleanup: + free(obj_log_buf); + test_log_buf__destroy(skel); + libbpf_set_print(old_print_cb); +} + +static void bpf_prog_load_log_buf(void) +{ + const struct bpf_insn good_prog_insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + const size_t good_prog_insn_cnt = sizeof(good_prog_insns) / sizeof(struct bpf_insn); + const struct bpf_insn bad_prog_insns[] = { + BPF_EXIT_INSN(), + }; + size_t bad_prog_insn_cnt = sizeof(bad_prog_insns) / sizeof(struct bpf_insn); + LIBBPF_OPTS(bpf_prog_load_opts, opts); + const size_t log_buf_sz = 1024 * 1024; + char *log_buf; + int fd = -1; + + log_buf = malloc(log_buf_sz); + if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc")) + return; + opts.log_buf = log_buf; + opts.log_size = log_buf_sz; + + /* with log_level == 0 log_buf shoud stay empty for good prog */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL", + good_prog_insns, good_prog_insn_cnt, &opts); + ASSERT_STREQ(log_buf, "", "good_log_0"); + ASSERT_GE(fd, 0, "good_fd1"); + if (fd >= 0) + close(fd); + fd = -1; + + /* log_level == 2 should always fill log_buf, even for good prog */ + log_buf[0] = '\0'; + opts.log_level = 2; + fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL", + good_prog_insns, good_prog_insn_cnt, &opts); + ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), "good_log_2"); + ASSERT_GE(fd, 0, "good_fd2"); + if (fd >= 0) + close(fd); + fd = -1; + + /* log_level == 0 should fill log_buf for bad prog */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "bad_prog", "GPL", + bad_prog_insns, bad_prog_insn_cnt, &opts); + ASSERT_OK_PTR(strstr(log_buf, "R0 !read_ok"), "bad_log_0"); + ASSERT_LT(fd, 0, "bad_fd"); + if (fd >= 0) + close(fd); + fd = -1; + + free(log_buf); +} + +static void bpf_btf_load_log_buf(void) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + const size_t log_buf_sz = 1024 * 1024; + const void *raw_btf_data; + __u32 raw_btf_size; + struct btf *btf; + char *log_buf; + int fd = -1; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "empty_btf")) + return; + + ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type"); + + raw_btf_data = btf__raw_data(btf, &raw_btf_size); + if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good")) + goto cleanup; + + log_buf = malloc(log_buf_sz); + if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc")) + goto cleanup; + opts.log_buf = log_buf; + opts.log_size = log_buf_sz; + + /* with log_level == 0 log_buf shoud stay empty for good BTF */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts); + ASSERT_STREQ(log_buf, "", "good_log_0"); + ASSERT_GE(fd, 0, "good_fd1"); + if (fd >= 0) + close(fd); + fd = -1; + + /* log_level == 2 should always fill log_buf, even for good BTF */ + log_buf[0] = '\0'; + opts.log_level = 2; + fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts); + printf("LOG_BUF: %s\n", log_buf); + ASSERT_OK_PTR(strstr(log_buf, "magic: 0xeb9f"), "good_log_2"); + ASSERT_GE(fd, 0, "good_fd2"); + if (fd >= 0) + close(fd); + fd = -1; + + /* make BTF bad, add pointer pointing to non-existing type */ + ASSERT_GT(btf__add_ptr(btf, 100), 0, "bad_ptr_type"); + + raw_btf_data = btf__raw_data(btf, &raw_btf_size); + if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_bad")) + goto cleanup; + + /* log_level == 0 should fill log_buf for bad BTF */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts); + printf("LOG_BUF: %s\n", log_buf); + ASSERT_OK_PTR(strstr(log_buf, "[2] PTR (anon) type_id=100 Invalid type_id"), "bad_log_0"); + ASSERT_LT(fd, 0, "bad_fd"); + if (fd >= 0) + close(fd); + fd = -1; + +cleanup: + free(log_buf); + btf__free(btf); +} + +void test_log_buf(void) +{ + if (test__start_subtest("obj_load_log_buf")) + obj_load_log_buf(); + if (test__start_subtest("bpf_prog_load_log_buf")) + bpf_prog_load_log_buf(); + if (test__start_subtest("bpf_btf_load_log_buf")) + bpf_btf_load_log_buf(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_ptr.c b/tools/testing/selftests/bpf/prog_tests/map_ptr.c index 4972f92205c7..273725504f11 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_ptr.c +++ b/tools/testing/selftests/bpf/prog_tests/map_ptr.c @@ -4,31 +4,29 @@ #include <test_progs.h> #include <network_helpers.h> -#include "map_ptr_kern.skel.h" +#include "map_ptr_kern.lskel.h" void test_map_ptr(void) { - struct map_ptr_kern *skel; + struct map_ptr_kern_lskel *skel; __u32 duration = 0, retval; char buf[128]; int err; int page_size = getpagesize(); - skel = map_ptr_kern__open(); + skel = map_ptr_kern_lskel__open(); if (!ASSERT_OK_PTR(skel, "skel_open")) return; - err = bpf_map__set_max_entries(skel->maps.m_ringbuf, page_size); - if (!ASSERT_OK(err, "bpf_map__set_max_entries")) - goto cleanup; + skel->maps.m_ringbuf.max_entries = page_size; - err = map_ptr_kern__load(skel); + err = map_ptr_kern_lskel__load(skel); if (!ASSERT_OK(err, "skel_load")) goto cleanup; skel->bss->page_size = page_size; - err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4, + err = bpf_prog_test_run(skel->progs.cg_skb.prog_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, NULL, &retval, NULL); if (CHECK(err, "test_run", "err=%d errno=%d\n", err, errno)) @@ -39,5 +37,5 @@ void test_map_ptr(void) goto cleanup; cleanup: - map_ptr_kern__destroy(skel); + map_ptr_kern_lskel__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c index d4b953ae3407..31c09ba577eb 100644 --- a/tools/testing/selftests/bpf/prog_tests/pinning.c +++ b/tools/testing/selftests/bpf/prog_tests/pinning.c @@ -241,8 +241,8 @@ void test_pinning(void) goto out; } - map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(__u32), - sizeof(__u64), 1, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(__u32), + sizeof(__u64), 1, NULL); if (CHECK(map_fd < 0, "create pinmap manually", "fd %d\n", map_fd)) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/prog_array_init.c b/tools/testing/selftests/bpf/prog_tests/prog_array_init.c new file mode 100644 index 000000000000..fc4657619739 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prog_array_init.c @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include <test_progs.h> +#include "test_prog_array_init.skel.h" + +void test_prog_array_init(void) +{ + struct test_prog_array_init *skel; + int err; + + skel = test_prog_array_init__open(); + if (!ASSERT_OK_PTR(skel, "could not open BPF object")) + return; + + skel->rodata->my_pid = getpid(); + + err = test_prog_array_init__load(skel); + if (!ASSERT_OK(err, "could not load BPF object")) + goto cleanup; + + skel->links.entry = bpf_program__attach_raw_tracepoint(skel->progs.entry, "sys_enter"); + if (!ASSERT_OK_PTR(skel->links.entry, "could not attach BPF program")) + goto cleanup; + + usleep(1); + + ASSERT_EQ(skel->bss->value, 42, "unexpected value"); + +cleanup: + test_prog_array_init__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c index 8ccba3ab70ee..b9822f914eeb 100644 --- a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c +++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c @@ -14,7 +14,7 @@ static void test_queue_stack_map_by_type(int type) int i, err, prog_fd, map_in_fd, map_out_fd; char file[32], buf[128]; struct bpf_object *obj; - struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); + struct iphdr iph; /* Fill test values to be used */ for (i = 0; i < MAP_SIZE; i++) @@ -60,15 +60,17 @@ static void test_queue_stack_map_by_type(int type) err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); - if (err || retval || size != sizeof(pkt_v4) || - iph->daddr != val) + if (err || retval || size != sizeof(pkt_v4)) + break; + memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); + if (iph.daddr != val) break; } - CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val, + CHECK(err || retval || size != sizeof(pkt_v4) || iph.daddr != val, "bpf_map_pop_elem", "err %d errno %d retval %d size %d iph->daddr %u\n", - err, errno, retval, size, iph->daddr); + err, errno, retval, size, iph.daddr); /* Queue is empty, program should return TC_ACT_SHOT */ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c index 167cd8a2edfd..e945195b24c9 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c @@ -62,8 +62,8 @@ void test_ringbuf_multi(void) if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) goto cleanup; - proto_fd = bpf_create_map(BPF_MAP_TYPE_RINGBUF, 0, 0, page_size, 0); - if (CHECK(proto_fd < 0, "bpf_create_map", "bpf_create_map failed\n")) + proto_fd = bpf_map_create(BPF_MAP_TYPE_RINGBUF, NULL, 0, 0, page_size, NULL); + if (CHECK(proto_fd < 0, "bpf_map_create", "bpf_map_create failed\n")) goto cleanup; err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c index 3cfc910ab3c1..980ac0f2c0bb 100644 --- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c @@ -66,29 +66,20 @@ static union sa46 { static int create_maps(enum bpf_map_type inner_type) { - struct bpf_create_map_attr attr = {}; + LIBBPF_OPTS(bpf_map_create_opts, opts); inner_map_type = inner_type; /* Creating reuseport_array */ - attr.name = "reuseport_array"; - attr.map_type = inner_type; - attr.key_size = sizeof(__u32); - attr.value_size = sizeof(__u32); - attr.max_entries = REUSEPORT_ARRAY_SIZE; - - reuseport_array = bpf_create_map_xattr(&attr); + reuseport_array = bpf_map_create(inner_type, "reuseport_array", + sizeof(__u32), sizeof(__u32), REUSEPORT_ARRAY_SIZE, NULL); RET_ERR(reuseport_array < 0, "creating reuseport_array", "reuseport_array:%d errno:%d\n", reuseport_array, errno); /* Creating outer_map */ - attr.name = "outer_map"; - attr.map_type = BPF_MAP_TYPE_ARRAY_OF_MAPS; - attr.key_size = sizeof(__u32); - attr.value_size = sizeof(__u32); - attr.max_entries = 1; - attr.inner_map_fd = reuseport_array; - outer_map = bpf_create_map_xattr(&attr); + opts.inner_map_fd = reuseport_array; + outer_map = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, "outer_map", + sizeof(__u32), sizeof(__u32), 1, &opts); RET_ERR(outer_map < 0, "creating outer_map", "outer_map:%d errno:%d\n", outer_map, errno); diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 1352ec104149..85db0f4cdd95 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -91,9 +91,9 @@ static void test_sockmap_create_update_free(enum bpf_map_type map_type) if (CHECK_FAIL(s < 0)) return; - map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0); + map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); if (CHECK_FAIL(map < 0)) { - perror("bpf_create_map"); + perror("bpf_cmap_create"); goto out; } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c index 7a0d64fdc192..af293ea1542c 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -97,7 +97,7 @@ static void run_tests(int family, enum bpf_map_type map_type) char test_name[MAX_TEST_NAME]; int map; - map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0); + map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); if (CHECK_FAIL(map < 0)) { perror("bpf_map_create"); return; diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 2a9cb951bfd6..7e21bfab6358 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -502,8 +502,8 @@ static void test_lookup_32_bit_value(int family, int sotype, int mapfd) if (s < 0) return; - mapfd = bpf_create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(key), - sizeof(value32), 1, 0); + mapfd = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(key), + sizeof(value32), 1, NULL); if (mapfd < 0) { FAIL_ERRNO("map_create"); goto close; diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c index 86f97681ad89..6a953f4adfdc 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -167,20 +167,20 @@ static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title) static void run_test(int cgroup_fd) { - struct bpf_prog_load_attr attr = { - .file = "./sockopt_inherit.o", - }; int server_fd = -1, client_fd; struct bpf_object *obj; void *server_err; pthread_t tid; - int ignored; int err; - err = bpf_prog_load_xattr(&attr, &obj, &ignored); - if (CHECK_FAIL(err)) + obj = bpf_object__open_file("sockopt_inherit.o", NULL); + if (!ASSERT_OK_PTR(obj, "obj_open")) return; + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) + goto close_bpf_object; + err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt"); if (CHECK_FAIL(err)) goto close_bpf_object; diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c index bc34f7773444..abce12ddcc37 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c @@ -297,14 +297,10 @@ detach: void test_sockopt_multi(void) { - struct bpf_prog_load_attr attr = { - .file = "./sockopt_multi.o", - }; int cg_parent = -1, cg_child = -1; struct bpf_object *obj = NULL; int sock_fd = -1; int err = -1; - int ignored; cg_parent = test__join_cgroup("/parent"); if (CHECK_FAIL(cg_parent < 0)) @@ -314,8 +310,12 @@ void test_sockopt_multi(void) if (CHECK_FAIL(cg_child < 0)) goto out; - err = bpf_prog_load_xattr(&attr, &obj, &ignored); - if (CHECK_FAIL(err)) + obj = bpf_object__open_file("sockopt_multi.o", NULL); + if (!ASSERT_OK_PTR(obj, "obj_load")) + goto out; + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) goto out; sock_fd = socket(AF_INET, SOCK_STREAM, 0); diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c index 265b4fe33ec3..96ff2c20af81 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -2,6 +2,7 @@ #include <test_progs.h> #include "cgroup_helpers.h" #include "network_helpers.h" +#include "tcp_rtt.skel.h" struct tcp_rtt_storage { __u32 invoked; @@ -91,26 +92,18 @@ static int verify_sk(int map_fd, int client_fd, const char *msg, __u32 invoked, static int run_test(int cgroup_fd, int server_fd) { - struct bpf_prog_load_attr attr = { - .prog_type = BPF_PROG_TYPE_SOCK_OPS, - .file = "./tcp_rtt.o", - .expected_attach_type = BPF_CGROUP_SOCK_OPS, - }; - struct bpf_object *obj; - struct bpf_map *map; + struct tcp_rtt *skel; int client_fd; int prog_fd; int map_fd; int err; - err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); - if (err) { - log_err("Failed to load BPF object"); + skel = tcp_rtt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_load")) return -1; - } - map = bpf_object__next_map(obj, NULL); - map_fd = bpf_map__fd(map); + map_fd = bpf_map__fd(skel->maps.socket_storage_map); + prog_fd = bpf_program__fd(skel->progs._sockops); err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); if (err) { @@ -149,7 +142,7 @@ close_client_fd: close(client_fd); close_bpf_object: - bpf_object__close(obj); + tcp_rtt__destroy(skel); return err; } diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c index d29ebfeef9c5..214d9f4a94a5 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c +++ b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c @@ -19,11 +19,13 @@ static int read_iter(char *file) fd = open(file, 0); if (fd < 0) return -1; - while ((len = read(fd, buf, sizeof(buf))) > 0) + while ((len = read(fd, buf, sizeof(buf))) > 0) { + buf[sizeof(buf) - 1] = '\0'; if (strstr(buf, "iter")) { close(fd); return 0; } + } close(fd); return -1; } @@ -80,7 +82,7 @@ static int fn(void) if (!ASSERT_OK(err, "creating " TDIR "/fs1/b")) goto out; - map = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0); + map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL); if (!ASSERT_GT(map, 0, "create_map(ARRAY)")) goto out; err = bpf_obj_pin(map, TDIR "/fs1/c"); diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c index 7e13129f593a..509e21d5cb9d 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c @@ -30,17 +30,29 @@ extern int extra_prog_load_log_flags; static int check_load(const char *file) { - struct bpf_prog_load_attr attr; struct bpf_object *obj = NULL; - int err, prog_fd; + struct bpf_program *prog; + int err; - memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); - attr.file = file; - attr.prog_type = BPF_PROG_TYPE_UNSPEC; - attr.log_level = extra_prog_load_log_flags; - attr.prog_flags = BPF_F_TEST_RND_HI32; found = false; - err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); + + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (err) + return err; + + prog = bpf_object__next_program(obj, NULL); + if (!prog) { + err = -ENOENT; + goto err_out; + } + + bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32); + bpf_program__set_log_level(prog, extra_prog_load_log_flags); + + err = bpf_object__load(obj); + +err_out: bpf_object__close(obj); return err; } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c index 7a7ef9d4e151..ac65456b7ab8 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp.c @@ -11,8 +11,8 @@ void test_xdp(void) const char *file = "./test_xdp.o"; struct bpf_object *obj; char buf[128]; - struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr); - struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); + struct ipv6hdr iph6; + struct iphdr iph; __u32 duration, retval, size; int err, prog_fd, map_fd; @@ -28,16 +28,17 @@ void test_xdp(void) err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); - + memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); CHECK(err || retval != XDP_TX || size != 74 || - iph->protocol != IPPROTO_IPIP, "ipv4", + iph.protocol != IPPROTO_IPIP, "ipv4", "err %d errno %d retval %d size %d\n", err, errno, retval, size); err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), buf, &size, &retval, &duration); + memcpy(&iph6, buf + sizeof(struct ethhdr), sizeof(iph6)); CHECK(err || retval != XDP_TX || size != 114 || - iph6->nexthdr != IPPROTO_IPV6, "ipv6", + iph6.nexthdr != IPPROTO_IPV6, "ipv6", "err %d errno %d retval %d size %d\n", err, errno, retval, size); out: diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c index faa22b84f2ee..5e3a26b15ec6 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -218,9 +218,9 @@ static int send_udp_packets(int vary_dst_ip) .h_dest = BOND2_MAC, .h_proto = htons(ETH_P_IP), }; - uint8_t buf[128] = {}; - struct iphdr *iph = (struct iphdr *)(buf + sizeof(eh)); - struct udphdr *uh = (struct udphdr *)(buf + sizeof(eh) + sizeof(*iph)); + struct iphdr iph = {}; + struct udphdr uh = {}; + uint8_t buf[128]; int i, s = -1; int ifindex; @@ -232,17 +232,16 @@ static int send_udp_packets(int vary_dst_ip) if (!ASSERT_GT(ifindex, 0, "get bond1 ifindex")) goto err; - memcpy(buf, &eh, sizeof(eh)); - iph->ihl = 5; - iph->version = 4; - iph->tos = 16; - iph->id = 1; - iph->ttl = 64; - iph->protocol = IPPROTO_UDP; - iph->saddr = 1; - iph->daddr = 2; - iph->tot_len = htons(sizeof(buf) - ETH_HLEN); - iph->check = 0; + iph.ihl = 5; + iph.version = 4; + iph.tos = 16; + iph.id = 1; + iph.ttl = 64; + iph.protocol = IPPROTO_UDP; + iph.saddr = 1; + iph.daddr = 2; + iph.tot_len = htons(sizeof(buf) - ETH_HLEN); + iph.check = 0; for (i = 1; i <= NPACKETS; i++) { int n; @@ -253,10 +252,15 @@ static int send_udp_packets(int vary_dst_ip) }; /* vary the UDP destination port for even distribution with roundrobin/xor modes */ - uh->dest++; + uh.dest++; if (vary_dst_ip) - iph->daddr++; + iph.daddr++; + + /* construct a packet */ + memcpy(buf, &eh, sizeof(eh)); + memcpy(buf + sizeof(eh), &iph, sizeof(iph)); + memcpy(buf + sizeof(eh) + sizeof(iph), &uh, sizeof(uh)); n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&saddr_ll, sizeof(saddr_ll)); if (!ASSERT_EQ(n, sizeof(buf), "sendto")) diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c index f99386d1dc4c..c98a897ad692 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c @@ -42,7 +42,7 @@ void test_xdp_bpf2bpf(void) char buf[128]; int err, pkt_fd, map_fd; bool passed = false; - struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); + struct iphdr iph; struct iptnl_info value4 = {.family = AF_INET}; struct test_xdp *pkt_skel = NULL; struct test_xdp_bpf2bpf *ftrace_skel = NULL; @@ -93,9 +93,9 @@ void test_xdp_bpf2bpf(void) /* Run test program */ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); - + memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); if (CHECK(err || retval != XDP_TX || size != 74 || - iph->protocol != IPPROTO_IPIP, "ipv4", + iph.protocol != IPPROTO_IPIP, "ipv4", "err %d errno %d retval %d size %d\n", err, errno, retval, size)) goto out; diff --git a/tools/testing/selftests/bpf/progs/bpf_loop.c b/tools/testing/selftests/bpf/progs/bpf_loop.c new file mode 100644 index 000000000000..12349e4601e8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_loop.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct callback_ctx { + int output; +}; + +/* These should be set by the user program */ +u32 nested_callback_nr_loops; +u32 stop_index = -1; +u32 nr_loops; +int pid; + +/* Making these global variables so that the userspace program + * can verify the output through the skeleton + */ +int nr_loops_returned; +int g_output; +int err; + +static int callback(__u32 index, void *data) +{ + struct callback_ctx *ctx = data; + + if (index >= stop_index) + return 1; + + ctx->output += index; + + return 0; +} + +static int empty_callback(__u32 index, void *data) +{ + return 0; +} + +static int nested_callback2(__u32 index, void *data) +{ + nr_loops_returned += bpf_loop(nested_callback_nr_loops, callback, data, 0); + + return 0; +} + +static int nested_callback1(__u32 index, void *data) +{ + bpf_loop(nested_callback_nr_loops, nested_callback2, data, 0); + return 0; +} + +SEC("fentry/__x64_sys_nanosleep") +int test_prog(void *ctx) +{ + struct callback_ctx data = {}; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + nr_loops_returned = bpf_loop(nr_loops, callback, &data, 0); + + if (nr_loops_returned < 0) + err = nr_loops_returned; + else + g_output = data.output; + + return 0; +} + +SEC("fentry/__x64_sys_nanosleep") +int prog_null_ctx(void *ctx) +{ + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + nr_loops_returned = bpf_loop(nr_loops, empty_callback, NULL, 0); + + return 0; +} + +SEC("fentry/__x64_sys_nanosleep") +int prog_invalid_flags(void *ctx) +{ + struct callback_ctx data = {}; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + err = bpf_loop(nr_loops, callback, &data, 1); + + return 0; +} + +SEC("fentry/__x64_sys_nanosleep") +int prog_nested_calls(void *ctx) +{ + struct callback_ctx data = {}; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + nr_loops_returned = 0; + bpf_loop(nr_loops, nested_callback1, &data, 0); + + g_output = data.output; + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c new file mode 100644 index 000000000000..9dafdc244462 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +u32 nr_loops; +long hits; + +static int empty_callback(__u32 index, void *data) +{ + return 0; +} + +SEC("fentry/__x64_sys_getpgid") +int benchmark(void *ctx) +{ + for (int i = 0; i < 1000; i++) { + bpf_loop(nr_loops, empty_callback, NULL, 0); + + __sync_add_and_fetch(&hits, nr_loops); + } + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/core_kern.c b/tools/testing/selftests/bpf/progs/core_kern.c new file mode 100644 index 000000000000..13499cc15c7d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/core_kern.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" + +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> + +#define ATTR __always_inline +#include "test_jhash.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, 256); +} array1 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, 256); +} array2 SEC(".maps"); + +static __noinline int randmap(int v, const struct net_device *dev) +{ + struct bpf_map *map = (struct bpf_map *)&array1; + int key = bpf_get_prandom_u32() & 0xff; + int *val; + + if (bpf_get_prandom_u32() & 1) + map = (struct bpf_map *)&array2; + + val = bpf_map_lookup_elem(map, &key); + if (val) + *val = bpf_get_prandom_u32() + v + dev->mtu; + + return 0; +} + +SEC("tp_btf/xdp_devmap_xmit") +int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device + *from_dev, const struct net_device *to_dev, int sent, int drops, + int err) +{ + return randmap(from_dev->ifindex, from_dev); +} + +SEC("fentry/eth_type_trans") +int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, + struct net_device *dev, unsigned short protocol) +{ + return randmap(dev->ifindex + skb->len, dev); +} + +SEC("fexit/eth_type_trans") +int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb, + struct net_device *dev, unsigned short protocol) +{ + return randmap(dev->ifindex + skb->len, dev); +} + +volatile const int never; + +struct __sk_bUfF /* it will not exist in vmlinux */ { + int len; +} __attribute__((preserve_access_index)); + +struct bpf_testmod_test_read_ctx /* it exists in bpf_testmod */ { + size_t len; +} __attribute__((preserve_access_index)); + +SEC("tc") +int balancer_ingress(struct __sk_buff *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + void *ptr; + int ret = 0, nh_off, i = 0; + + nh_off = 14; + + /* pragma unroll doesn't work on large loops */ +#define C do { \ + ptr = data + i; \ + if (ptr + nh_off > data_end) \ + break; \ + ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \ + if (never) { \ + /* below is a dead code with unresolvable CO-RE relo */ \ + i += ((struct __sk_bUfF *)ctx)->len; \ + /* this CO-RE relo may or may not resolve + * depending on whether bpf_testmod is loaded. + */ \ + i += ((struct bpf_testmod_test_read_ctx *)ctx)->len; \ + } \ + } while (0); +#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C; + C30;C30;C30; /* 90 calls */ + return 0; +} + +char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c index b1b711d9b214..b64df94ec476 100644 --- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c +++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c @@ -334,9 +334,11 @@ static inline int check_lpm_trie(void) return 1; } +#define INNER_MAX_ENTRIES 1234 + struct inner_map { __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); + __uint(max_entries, INNER_MAX_ENTRIES); __type(key, __u32); __type(value, __u32); } inner_map SEC(".maps"); @@ -348,7 +350,7 @@ struct { __type(value, __u32); __array(values, struct { __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); + __uint(max_entries, INNER_MAX_ENTRIES); __type(key, __u32); __type(value, __u32); }); @@ -360,8 +362,13 @@ static inline int check_array_of_maps(void) { struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps; struct bpf_map *map = (struct bpf_map *)&m_array_of_maps; + struct bpf_array *inner_map; + int key = 0; VERIFY(check_default(&array_of_maps->map, map)); + inner_map = bpf_map_lookup_elem(array_of_maps, &key); + VERIFY(inner_map != 0); + VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES); return 1; } @@ -382,8 +389,13 @@ static inline int check_hash_of_maps(void) { struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps; struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps; + struct bpf_htab *inner_map; + int key = 2; VERIFY(check_default(&hash_of_maps->map, map)); + inner_map = bpf_map_lookup_elem(hash_of_maps, &key); + VERIFY(inner_map != 0); + VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES); return 1; } diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h index 2fb7adafb6b6..1ed28882daf3 100644 --- a/tools/testing/selftests/bpf/progs/pyperf.h +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -159,6 +159,59 @@ struct { __uint(value_size, sizeof(long long) * 127); } stackmap SEC(".maps"); +#ifdef USE_BPF_LOOP +struct process_frame_ctx { + int cur_cpu; + int32_t *symbol_counter; + void *frame_ptr; + FrameData *frame; + PidData *pidData; + Symbol *sym; + Event *event; + bool done; +}; + +#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var)) + +static int process_frame_callback(__u32 i, struct process_frame_ctx *ctx) +{ + int zero = 0; + void *frame_ptr = ctx->frame_ptr; + PidData *pidData = ctx->pidData; + FrameData *frame = ctx->frame; + int32_t *symbol_counter = ctx->symbol_counter; + int cur_cpu = ctx->cur_cpu; + Event *event = ctx->event; + Symbol *sym = ctx->sym; + + if (frame_ptr && get_frame_data(frame_ptr, pidData, frame, sym)) { + int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu; + int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, sym); + + if (!symbol_id) { + bpf_map_update_elem(&symbolmap, sym, &zero, 0); + symbol_id = bpf_map_lookup_elem(&symbolmap, sym); + if (!symbol_id) { + ctx->done = true; + return 1; + } + } + if (*symbol_id == new_symbol_id) + (*symbol_counter)++; + + barrier_var(i); + if (i >= STACK_MAX_LEN) + return 1; + + event->stack[i] = *symbol_id; + + event->stack_len = i + 1; + frame_ptr = frame->f_back; + } + return 0; +} +#endif /* USE_BPF_LOOP */ + #ifdef GLOBAL_FUNC __noinline #elif defined(SUBPROGS) @@ -228,11 +281,26 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx) int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym); if (symbol_counter == NULL) return 0; +#ifdef USE_BPF_LOOP + struct process_frame_ctx ctx = { + .cur_cpu = cur_cpu, + .symbol_counter = symbol_counter, + .frame_ptr = frame_ptr, + .frame = &frame, + .pidData = pidData, + .sym = &sym, + .event = event, + }; + + bpf_loop(STACK_MAX_LEN, process_frame_callback, &ctx, 0); + if (ctx.done) + return 0; +#else #ifdef NO_UNROLL #pragma clang loop unroll(disable) #else #pragma clang loop unroll(full) -#endif +#endif /* NO_UNROLL */ /* Unwind python stack */ for (int i = 0; i < STACK_MAX_LEN; ++i) { if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) { @@ -251,6 +319,7 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx) frame_ptr = frame.f_back; } } +#endif /* USE_BPF_LOOP */ event->stack_complete = frame_ptr == NULL; } else { event->stack_complete = 1; diff --git a/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c b/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c new file mode 100644 index 000000000000..5c2059dc01af --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#define STACK_MAX_LEN 600 +#define USE_BPF_LOOP +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index 60c93aee2f4a..753718595c26 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -445,6 +445,48 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, return payload; } +#ifdef USE_BPF_LOOP +enum read_type { + READ_INT_VAR, + READ_MAP_VAR, + READ_STR_VAR, +}; + +struct read_var_ctx { + struct strobemeta_payload *data; + void *tls_base; + struct strobemeta_cfg *cfg; + void *payload; + /* value gets mutated */ + struct strobe_value_generic *value; + enum read_type type; +}; + +static int read_var_callback(__u32 index, struct read_var_ctx *ctx) +{ + switch (ctx->type) { + case READ_INT_VAR: + if (index >= STROBE_MAX_INTS) + return 1; + read_int_var(ctx->cfg, index, ctx->tls_base, ctx->value, ctx->data); + break; + case READ_MAP_VAR: + if (index >= STROBE_MAX_MAPS) + return 1; + ctx->payload = read_map_var(ctx->cfg, index, ctx->tls_base, + ctx->value, ctx->data, ctx->payload); + break; + case READ_STR_VAR: + if (index >= STROBE_MAX_STRS) + return 1; + ctx->payload += read_str_var(ctx->cfg, index, ctx->tls_base, + ctx->value, ctx->data, ctx->payload); + break; + } + return 0; +} +#endif /* USE_BPF_LOOP */ + /* * read_strobe_meta returns NULL, if no metadata was read; otherwise returns * pointer to *right after* payload ends @@ -475,11 +517,36 @@ static void *read_strobe_meta(struct task_struct *task, */ tls_base = (void *)task; +#ifdef USE_BPF_LOOP + struct read_var_ctx ctx = { + .cfg = cfg, + .tls_base = tls_base, + .value = &value, + .data = data, + .payload = payload, + }; + int err; + + ctx.type = READ_INT_VAR; + err = bpf_loop(STROBE_MAX_INTS, read_var_callback, &ctx, 0); + if (err != STROBE_MAX_INTS) + return NULL; + + ctx.type = READ_STR_VAR; + err = bpf_loop(STROBE_MAX_STRS, read_var_callback, &ctx, 0); + if (err != STROBE_MAX_STRS) + return NULL; + + ctx.type = READ_MAP_VAR; + err = bpf_loop(STROBE_MAX_MAPS, read_var_callback, &ctx, 0); + if (err != STROBE_MAX_MAPS) + return NULL; +#else #ifdef NO_UNROLL #pragma clang loop unroll(disable) #else #pragma unroll -#endif +#endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_INTS; ++i) { read_int_var(cfg, i, tls_base, &value, data); } @@ -487,7 +554,7 @@ static void *read_strobe_meta(struct task_struct *task, #pragma clang loop unroll(disable) #else #pragma unroll -#endif +#endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_STRS; ++i) { payload += read_str_var(cfg, i, tls_base, &value, data, payload); } @@ -495,10 +562,12 @@ static void *read_strobe_meta(struct task_struct *task, #pragma clang loop unroll(disable) #else #pragma unroll -#endif +#endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_MAPS; ++i) { payload = read_map_var(cfg, i, tls_base, &value, data, payload); } +#endif /* USE_BPF_LOOP */ + /* * return pointer right after end of payload, so it's possible to * calculate exact amount of useful data that needs to be sent diff --git a/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c b/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c new file mode 100644 index 000000000000..d18b992f0165 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2021 Facebook */ + +#define STROBE_MAX_INTS 2 +#define STROBE_MAX_STRS 25 +#define STROBE_MAX_MAPS 100 +#define STROBE_MAX_MAP_ENTRIES 20 +#define USE_BPF_LOOP +#include "strobemeta.h" diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c index 8eadbd4caf7a..5f8379aadb29 100644 --- a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c +++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c @@ -38,7 +38,7 @@ int pass_handler(const void *ctx) /* tests existing symbols. */ rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0); if (rq) - out__existing_typed = 0; + out__existing_typed = rq->cpu; out__existing_typeless = (__u64)&bpf_prog_active; /* tests non-existent symbols. */ diff --git a/tools/testing/selftests/bpf/progs/test_legacy_printk.c b/tools/testing/selftests/bpf/progs/test_legacy_printk.c new file mode 100644 index 000000000000..64c2d9ced529 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_legacy_printk.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <linux/bpf.h> +#define BPF_NO_GLOBAL_DATA +#include <bpf/bpf_helpers.h> + +char LICENSE[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, int); + __uint(max_entries, 1); +} my_pid_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, int); + __uint(max_entries, 1); +} res_map SEC(".maps"); + +volatile int my_pid_var = 0; +volatile int res_var = 0; + +SEC("tp/raw_syscalls/sys_enter") +int handle_legacy(void *ctx) +{ + int zero = 0, *my_pid, cur_pid, *my_res; + + my_pid = bpf_map_lookup_elem(&my_pid_map, &zero); + if (!my_pid) + return 1; + + cur_pid = bpf_get_current_pid_tgid() >> 32; + if (cur_pid != *my_pid) + return 1; + + my_res = bpf_map_lookup_elem(&res_map, &zero); + if (!my_res) + return 1; + + if (*my_res == 0) + /* use bpf_printk() in combination with BPF_NO_GLOBAL_DATA to + * force .rodata.str1.1 section that previously caused + * problems on old kernels due to libbpf always tried to + * create a global data map for it + */ + bpf_printk("Legacy-case bpf_printk test, pid %d\n", cur_pid); + *my_res = 1; + + return *my_res; +} + +SEC("tp/raw_syscalls/sys_enter") +int handle_modern(void *ctx) +{ + int zero = 0, cur_pid; + + cur_pid = bpf_get_current_pid_tgid() >> 32; + if (cur_pid != my_pid_var) + return 1; + + if (res_var == 0) + /* we need bpf_printk() to validate libbpf logic around unused + * global maps and legacy kernels; see comment in handle_legacy() + */ + bpf_printk("Modern-case bpf_printk test, pid %d\n", cur_pid); + res_var = 1; + + return res_var; +} diff --git a/tools/testing/selftests/bpf/progs/test_log_buf.c b/tools/testing/selftests/bpf/progs/test_log_buf.c new file mode 100644 index 000000000000..199f459bd5ae --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_log_buf.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +int a[4]; +const volatile int off = 4000; + +SEC("raw_tp/sys_enter") +int good_prog(const void *ctx) +{ + a[0] = (int)(long)ctx; + return a[1]; +} + +SEC("raw_tp/sys_enter") +int bad_prog(const void *ctx) +{ + /* out of bounds access */ + return a[off]; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_prog_array_init.c b/tools/testing/selftests/bpf/progs/test_prog_array_init.c new file mode 100644 index 000000000000..2cd138356126 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_prog_array_init.c @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +const volatile pid_t my_pid = 0; +int value = 0; + +SEC("raw_tp/sys_enter") +int tailcall_1(void *ctx) +{ + value = 42; + return 0; +} + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 2); + __uint(key_size, sizeof(__u32)); + __array(values, int (void *)); +} prog_array_init SEC(".maps") = { + .values = { + [1] = (void *)&tailcall_1, + }, +}; + +SEC("raw_tp/sys_enter") +int entry(void *ctx) +{ + pid_t pid = bpf_get_current_pid_tgid() >> 32; + + if (pid != my_pid) + return 0; + + bpf_tail_call(ctx, &prog_array_init, 1); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale2.c b/tools/testing/selftests/bpf/progs/test_verif_scale2.c index f024154c7be7..f90ffcafd1e8 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale2.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale2.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #define ATTR __always_inline #include "test_jhash.h" -SEC("scale90_inline") +SEC("tc") int balancer_ingress(struct __sk_buff *ctx) { void *data_end = (void *)(long)ctx->data_end; diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c index 9a4d09590b3d..2098f3f27f18 100644 --- a/tools/testing/selftests/bpf/progs/trigger_bench.c +++ b/tools/testing/selftests/bpf/progs/trigger_bench.c @@ -52,3 +52,10 @@ int bench_trigger_fmodret(void *ctx) __sync_add_and_fetch(&hits, 1); return -22; } + +SEC("uprobe/self/uprobe_target") +int bench_trigger_uprobe(void *ctx) +{ + __sync_add_and_fetch(&hits, 1); + return 0; +} diff --git a/tools/testing/selftests/bpf/test_bpftool_synctypes.py b/tools/testing/selftests/bpf/test_bpftool_synctypes.py index be54b7335a76..6bf21e47882a 100755 --- a/tools/testing/selftests/bpf/test_bpftool_synctypes.py +++ b/tools/testing/selftests/bpf/test_bpftool_synctypes.py @@ -9,7 +9,15 @@ import os, sys LINUX_ROOT = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, os.pardir, os.pardir, os.pardir)) -BPFTOOL_DIR = os.path.join(LINUX_ROOT, 'tools/bpf/bpftool') +BPFTOOL_DIR = os.getenv('BPFTOOL_DIR', + os.path.join(LINUX_ROOT, 'tools/bpf/bpftool')) +BPFTOOL_BASHCOMP_DIR = os.getenv('BPFTOOL_BASHCOMP_DIR', + os.path.join(BPFTOOL_DIR, 'bash-completion')) +BPFTOOL_DOC_DIR = os.getenv('BPFTOOL_DOC_DIR', + os.path.join(BPFTOOL_DIR, 'Documentation')) +INCLUDE_DIR = os.getenv('INCLUDE_DIR', + os.path.join(LINUX_ROOT, 'tools/include')) + retval = 0 class BlockParser(object): @@ -242,12 +250,6 @@ class FileExtractor(object): end_marker = re.compile('}\\\\n') return self.__get_description_list(start_marker, pattern, end_marker) - def default_options(self): - """ - Return the default options contained in HELP_SPEC_OPTIONS - """ - return { '-j', '--json', '-p', '--pretty', '-d', '--debug' } - def get_bashcomp_list(self, block_name): """ Search for and parse a list of type names from a variable in bash @@ -274,7 +276,56 @@ class SourceFileExtractor(FileExtractor): defined in children classes. """ def get_options(self): - return self.default_options().union(self.get_help_list_macro('HELP_SPEC_OPTIONS')) + return self.get_help_list_macro('HELP_SPEC_OPTIONS') + +class MainHeaderFileExtractor(SourceFileExtractor): + """ + An extractor for bpftool's main.h + """ + filename = os.path.join(BPFTOOL_DIR, 'main.h') + + def get_common_options(self): + """ + Parse the list of common options in main.h (options that apply to all + commands), which looks to the lists of options in other source files + but has different start and end markers: + + "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug} | {-l|--legacy}" + + Return a set containing all options, such as: + + {'-p', '-d', '--legacy', '--pretty', '--debug', '--json', '-l', '-j'} + """ + start_marker = re.compile(f'"OPTIONS :=') + pattern = re.compile('([\w-]+) ?(?:\||}[ }\]"])') + end_marker = re.compile('#define') + + parser = InlineListParser(self.reader) + parser.search_block(start_marker) + return parser.parse(pattern, end_marker) + +class ManSubstitutionsExtractor(SourceFileExtractor): + """ + An extractor for substitutions.rst + """ + filename = os.path.join(BPFTOOL_DOC_DIR, 'substitutions.rst') + + def get_common_options(self): + """ + Parse the list of common options in substitutions.rst (options that + apply to all commands). + + Return a set containing all options, such as: + + {'-p', '-d', '--legacy', '--pretty', '--debug', '--json', '-l', '-j'} + """ + start_marker = re.compile('\|COMMON_OPTIONS\| replace:: {') + pattern = re.compile('\*\*([\w/-]+)\*\*') + end_marker = re.compile('}$') + + parser = InlineListParser(self.reader) + parser.search_block(start_marker) + return parser.parse(pattern, end_marker) class ProgFileExtractor(SourceFileExtractor): """ @@ -350,7 +401,7 @@ class BpfHeaderExtractor(FileExtractor): """ An extractor for the UAPI BPF header. """ - filename = os.path.join(LINUX_ROOT, 'tools/include/uapi/linux/bpf.h') + filename = os.path.join(INCLUDE_DIR, 'uapi/linux/bpf.h') def get_prog_types(self): return self.get_enum('bpf_prog_type') @@ -374,7 +425,7 @@ class ManProgExtractor(ManPageExtractor): """ An extractor for bpftool-prog.rst. """ - filename = os.path.join(BPFTOOL_DIR, 'Documentation/bpftool-prog.rst') + filename = os.path.join(BPFTOOL_DOC_DIR, 'bpftool-prog.rst') def get_attach_types(self): return self.get_rst_list('ATTACH_TYPE') @@ -383,7 +434,7 @@ class ManMapExtractor(ManPageExtractor): """ An extractor for bpftool-map.rst. """ - filename = os.path.join(BPFTOOL_DIR, 'Documentation/bpftool-map.rst') + filename = os.path.join(BPFTOOL_DOC_DIR, 'bpftool-map.rst') def get_map_types(self): return self.get_rst_list('TYPE') @@ -392,7 +443,7 @@ class ManCgroupExtractor(ManPageExtractor): """ An extractor for bpftool-cgroup.rst. """ - filename = os.path.join(BPFTOOL_DIR, 'Documentation/bpftool-cgroup.rst') + filename = os.path.join(BPFTOOL_DOC_DIR, 'bpftool-cgroup.rst') def get_attach_types(self): return self.get_rst_list('ATTACH_TYPE') @@ -411,7 +462,7 @@ class BashcompExtractor(FileExtractor): """ An extractor for bpftool's bash completion file. """ - filename = os.path.join(BPFTOOL_DIR, 'bash-completion/bpftool') + filename = os.path.join(BPFTOOL_BASHCOMP_DIR, 'bpftool') def get_prog_attach_types(self): return self.get_bashcomp_list('BPFTOOL_PROG_ATTACH_TYPES') @@ -562,7 +613,7 @@ def main(): help_cmd_options = source_info.get_options() source_info.close() - man_cmd_info = ManGenericExtractor(os.path.join('Documentation', 'bpftool-' + cmd + '.rst')) + man_cmd_info = ManGenericExtractor(os.path.join(BPFTOOL_DOC_DIR, 'bpftool-' + cmd + '.rst')) man_cmd_options = man_cmd_info.get_options() man_cmd_info.close() @@ -573,13 +624,26 @@ def main(): help_main_options = source_main_info.get_options() source_main_info.close() - man_main_info = ManGenericExtractor(os.path.join('Documentation', 'bpftool.rst')) + man_main_info = ManGenericExtractor(os.path.join(BPFTOOL_DOC_DIR, 'bpftool.rst')) man_main_options = man_main_info.get_options() man_main_info.close() verify(help_main_options, man_main_options, f'Comparing {source_main_info.filename} (do_help() OPTIONS) and {man_main_info.filename} (OPTIONS):') + # Compare common options (options that apply to all commands) + + main_hdr_info = MainHeaderFileExtractor() + source_common_options = main_hdr_info.get_common_options() + main_hdr_info.close() + + man_substitutions = ManSubstitutionsExtractor() + man_common_options = man_substitutions.get_common_options() + man_substitutions.close() + + verify(source_common_options, man_common_options, + f'Comparing common options from {main_hdr_info.filename} (HELP_SPEC_OPTIONS) and {man_substitutions.filename}:') + sys.exit(retval) if __name__ == "__main__": diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c index a63787e7bb1a..5b8314cd77fd 100644 --- a/tools/testing/selftests/bpf/test_cgroup_storage.c +++ b/tools/testing/selftests/bpf/test_cgroup_storage.c @@ -51,15 +51,15 @@ int main(int argc, char **argv) goto err; } - map_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, sizeof(key), - sizeof(value), 0, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, sizeof(key), + sizeof(value), 0, NULL); if (map_fd < 0) { printf("Failed to create map: %s\n", strerror(errno)); goto out; } - percpu_map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, - sizeof(key), sizeof(value), 0, 0); + percpu_map_fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL, + sizeof(key), sizeof(value), 0, NULL); if (percpu_map_fd < 0) { printf("Failed to create map: %s\n", strerror(errno)); goto out; diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index 006be3963977..baa3e3ecae82 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -208,6 +208,7 @@ static void test_lpm_order(void) static void test_lpm_map(int keysize) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); size_t i, j, n_matches, n_matches_after_delete, n_nodes, n_lookups; struct tlpm_node *t, *list = NULL; struct bpf_lpm_trie_key *key; @@ -233,11 +234,11 @@ static void test_lpm_map(int keysize) key = alloca(sizeof(*key) + keysize); memset(key, 0, sizeof(*key) + keysize); - map = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, + map = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, sizeof(*key) + keysize, keysize + 1, 4096, - BPF_F_NO_PREALLOC); + &opts); assert(map >= 0); for (i = 0; i < n_nodes; ++i) { @@ -329,6 +330,7 @@ static void test_lpm_map(int keysize) static void test_lpm_ipaddr(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); struct bpf_lpm_trie_key *key_ipv4; struct bpf_lpm_trie_key *key_ipv6; size_t key_size_ipv4; @@ -342,14 +344,14 @@ static void test_lpm_ipaddr(void) key_ipv4 = alloca(key_size_ipv4); key_ipv6 = alloca(key_size_ipv6); - map_fd_ipv4 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, + map_fd_ipv4 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size_ipv4, sizeof(value), - 100, BPF_F_NO_PREALLOC); + 100, &opts); assert(map_fd_ipv4 >= 0); - map_fd_ipv6 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, + map_fd_ipv6 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size_ipv6, sizeof(value), - 100, BPF_F_NO_PREALLOC); + 100, &opts); assert(map_fd_ipv6 >= 0); /* Fill data some IPv4 and IPv6 address ranges */ @@ -423,6 +425,7 @@ static void test_lpm_ipaddr(void) static void test_lpm_delete(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); struct bpf_lpm_trie_key *key; size_t key_size; int map_fd; @@ -431,9 +434,9 @@ static void test_lpm_delete(void) key_size = sizeof(*key) + sizeof(__u32); key = alloca(key_size); - map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, sizeof(value), - 100, BPF_F_NO_PREALLOC); + 100, &opts); assert(map_fd >= 0); /* Add nodes: @@ -535,6 +538,7 @@ static void test_lpm_delete(void) static void test_lpm_get_next_key(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); struct bpf_lpm_trie_key *key_p, *next_key_p; size_t key_size; __u32 value = 0; @@ -544,8 +548,7 @@ static void test_lpm_get_next_key(void) key_p = alloca(key_size); next_key_p = alloca(key_size); - map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, sizeof(value), - 100, BPF_F_NO_PREALLOC); + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, sizeof(value), 100, &opts); assert(map_fd >= 0); /* empty tree. get_next_key should return ENOENT */ @@ -753,6 +756,7 @@ static void setup_lpm_mt_test_info(struct lpm_mt_test_info *info, int map_fd) static void test_lpm_multi_thread(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); struct lpm_mt_test_info info[4]; size_t key_size, value_size; pthread_t thread_id[4]; @@ -762,8 +766,7 @@ static void test_lpm_multi_thread(void) /* create a trie */ value_size = sizeof(__u32); key_size = sizeof(struct bpf_lpm_trie_key) + value_size; - map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, key_size, value_size, - 100, BPF_F_NO_PREALLOC); + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts); /* create 4 threads to test update, delete, lookup and get_next_key */ setup_lpm_mt_test_info(&info[0], map_fd); diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 7f3d1d8460b4..b9f1bbbc8aba 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -28,13 +28,14 @@ static int nr_cpus; static int create_map(int map_type, int map_flags, unsigned int size) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags); int map_fd; - map_fd = bpf_create_map(map_type, sizeof(unsigned long long), - sizeof(unsigned long long), size, map_flags); + map_fd = bpf_map_create(map_type, NULL, sizeof(unsigned long long), + sizeof(unsigned long long), size, &opts); if (map_fd == -1) - perror("bpf_create_map"); + perror("bpf_map_create"); return map_fd; } @@ -42,7 +43,6 @@ static int create_map(int map_type, int map_flags, unsigned int size) static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key, void *value) { - struct bpf_create_map_attr map; struct bpf_insn insns[] = { BPF_LD_MAP_VALUE(BPF_REG_9, 0, 0), BPF_LD_MAP_FD(BPF_REG_1, fd), @@ -63,13 +63,7 @@ static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key, int mfd, pfd, ret, zero = 0; __u32 retval = 0; - memset(&map, 0, sizeof(map)); - map.map_type = BPF_MAP_TYPE_ARRAY; - map.key_size = sizeof(int); - map.value_size = sizeof(unsigned long long); - map.max_entries = 1; - - mfd = bpf_create_map_xattr(&map); + mfd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(__u64), 1, NULL); if (mfd < 0) return -1; diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 8b31bc1a801d..f4cd658bbe00 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -33,15 +33,14 @@ static int skips; -static int map_flags; +static struct bpf_map_create_opts map_opts = { .sz = sizeof(map_opts) }; static void test_hashmap(unsigned int task, void *data) { long long key, next_key, first_key, value; int fd; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - 2, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), 2, &map_opts); if (fd < 0) { printf("Failed to create hashmap '%s'!\n", strerror(errno)); exit(1); @@ -138,8 +137,7 @@ static void test_hashmap_sizes(unsigned int task, void *data) for (i = 1; i <= 512; i <<= 1) for (j = 1; j <= 1 << 18; j <<= 1) { - fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j, - 2, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, i, j, 2, &map_opts); if (fd < 0) { if (errno == ENOMEM) return; @@ -160,8 +158,8 @@ static void test_hashmap_percpu(unsigned int task, void *data) int expected_key_mask = 0; int fd, i; - fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), - sizeof(bpf_percpu(value, 0)), 2, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_HASH, NULL, sizeof(key), + sizeof(bpf_percpu(value, 0)), 2, &map_opts); if (fd < 0) { printf("Failed to create hashmap '%s'!\n", strerror(errno)); exit(1); @@ -272,11 +270,11 @@ static int helper_fill_hashmap(int max_entries) int i, fd, ret; long long key, value; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - max_entries, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), + max_entries, &map_opts); CHECK(fd < 0, "failed to create hashmap", - "err: %s, flags: 0x%x\n", strerror(errno), map_flags); + "err: %s, flags: 0x%x\n", strerror(errno), map_opts.map_flags); for (i = 0; i < max_entries; i++) { key = i; value = key; @@ -332,8 +330,8 @@ static void test_hashmap_zero_seed(void) int i, first, second, old_flags; long long key, next_first, next_second; - old_flags = map_flags; - map_flags |= BPF_F_ZERO_SEED; + old_flags = map_opts.map_flags; + map_opts.map_flags |= BPF_F_ZERO_SEED; first = helper_fill_hashmap(3); second = helper_fill_hashmap(3); @@ -355,7 +353,7 @@ static void test_hashmap_zero_seed(void) key = next_first; } - map_flags = old_flags; + map_opts.map_flags = old_flags; close(first); close(second); } @@ -365,8 +363,7 @@ static void test_arraymap(unsigned int task, void *data) int key, next_key, fd; long long value; - fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), - 2, 0); + fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(key), sizeof(value), 2, NULL); if (fd < 0) { printf("Failed to create arraymap '%s'!\n", strerror(errno)); exit(1); @@ -421,8 +418,8 @@ static void test_arraymap_percpu(unsigned int task, void *data) BPF_DECLARE_PERCPU(long, values); int key, next_key, fd, i; - fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), - sizeof(bpf_percpu(values, 0)), 2, 0); + fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, sizeof(key), + sizeof(bpf_percpu(values, 0)), 2, NULL); if (fd < 0) { printf("Failed to create arraymap '%s'!\n", strerror(errno)); exit(1); @@ -484,8 +481,8 @@ static void test_arraymap_percpu_many_keys(void) unsigned int nr_keys = 2000; int key, fd, i; - fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), - sizeof(bpf_percpu(values, 0)), nr_keys, 0); + fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, sizeof(key), + sizeof(bpf_percpu(values, 0)), nr_keys, NULL); if (fd < 0) { printf("Failed to create per-cpu arraymap '%s'!\n", strerror(errno)); @@ -516,8 +513,7 @@ static void test_devmap(unsigned int task, void *data) int fd; __u32 key, value; - fd = bpf_create_map(BPF_MAP_TYPE_DEVMAP, sizeof(key), sizeof(value), - 2, 0); + fd = bpf_map_create(BPF_MAP_TYPE_DEVMAP, NULL, sizeof(key), sizeof(value), 2, NULL); if (fd < 0) { printf("Failed to create devmap '%s'!\n", strerror(errno)); exit(1); @@ -531,8 +527,7 @@ static void test_devmap_hash(unsigned int task, void *data) int fd; __u32 key, value; - fd = bpf_create_map(BPF_MAP_TYPE_DEVMAP_HASH, sizeof(key), sizeof(value), - 2, 0); + fd = bpf_map_create(BPF_MAP_TYPE_DEVMAP_HASH, NULL, sizeof(key), sizeof(value), 2, NULL); if (fd < 0) { printf("Failed to create devmap_hash '%s'!\n", strerror(errno)); exit(1); @@ -552,14 +547,12 @@ static void test_queuemap(unsigned int task, void *data) vals[i] = rand(); /* Invalid key size */ - fd = bpf_create_map(BPF_MAP_TYPE_QUEUE, 4, sizeof(val), MAP_SIZE, - map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_QUEUE, NULL, 4, sizeof(val), MAP_SIZE, &map_opts); assert(fd < 0 && errno == EINVAL); - fd = bpf_create_map(BPF_MAP_TYPE_QUEUE, 0, sizeof(val), MAP_SIZE, - map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_QUEUE, NULL, 0, sizeof(val), MAP_SIZE, &map_opts); /* Queue map does not support BPF_F_NO_PREALLOC */ - if (map_flags & BPF_F_NO_PREALLOC) { + if (map_opts.map_flags & BPF_F_NO_PREALLOC) { assert(fd < 0 && errno == EINVAL); return; } @@ -610,14 +603,12 @@ static void test_stackmap(unsigned int task, void *data) vals[i] = rand(); /* Invalid key size */ - fd = bpf_create_map(BPF_MAP_TYPE_STACK, 4, sizeof(val), MAP_SIZE, - map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_STACK, NULL, 4, sizeof(val), MAP_SIZE, &map_opts); assert(fd < 0 && errno == EINVAL); - fd = bpf_create_map(BPF_MAP_TYPE_STACK, 0, sizeof(val), MAP_SIZE, - map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_STACK, NULL, 0, sizeof(val), MAP_SIZE, &map_opts); /* Stack map does not support BPF_F_NO_PREALLOC */ - if (map_flags & BPF_F_NO_PREALLOC) { + if (map_opts.map_flags & BPF_F_NO_PREALLOC) { assert(fd < 0 && errno == EINVAL); return; } @@ -744,9 +735,9 @@ static void test_sockmap(unsigned int tasks, void *data) } /* Test sockmap with connected sockets */ - fd = bpf_create_map(BPF_MAP_TYPE_SOCKMAP, + fd = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(key), sizeof(value), - 6, 0); + 6, NULL); if (fd < 0) { if (!bpf_probe_map_type(BPF_MAP_TYPE_SOCKMAP, 0)) { printf("%s SKIP (unsupported map type BPF_MAP_TYPE_SOCKMAP)\n", @@ -1168,8 +1159,7 @@ static void test_map_in_map(void) obj = bpf_object__open(MAPINMAP_PROG); - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int), sizeof(int), - 2, 0); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int), sizeof(int), 2, NULL); if (fd < 0) { printf("Failed to create hashmap '%s'!\n", strerror(errno)); exit(1); @@ -1315,8 +1305,8 @@ static void test_map_large(void) } key; int fd, i, value; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - MAP_SIZE, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), + MAP_SIZE, &map_opts); if (fd < 0) { printf("Failed to create large map '%s'!\n", strerror(errno)); exit(1); @@ -1469,8 +1459,8 @@ static void test_map_parallel(void) int i, fd, key = 0, value = 0; int data[2]; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - MAP_SIZE, map_flags); + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), + MAP_SIZE, &map_opts); if (fd < 0) { printf("Failed to create map for parallel test '%s'!\n", strerror(errno)); @@ -1518,9 +1508,13 @@ static void test_map_parallel(void) static void test_map_rdonly(void) { int fd, key = 0, value = 0; + __u32 old_flags; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - MAP_SIZE, map_flags | BPF_F_RDONLY); + old_flags = map_opts.map_flags; + map_opts.map_flags |= BPF_F_RDONLY; + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), + MAP_SIZE, &map_opts); + map_opts.map_flags = old_flags; if (fd < 0) { printf("Failed to create map for read only test '%s'!\n", strerror(errno)); @@ -1543,9 +1537,13 @@ static void test_map_rdonly(void) static void test_map_wronly_hash(void) { int fd, key = 0, value = 0; + __u32 old_flags; - fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), - MAP_SIZE, map_flags | BPF_F_WRONLY); + old_flags = map_opts.map_flags; + map_opts.map_flags |= BPF_F_WRONLY; + fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(key), sizeof(value), + MAP_SIZE, &map_opts); + map_opts.map_flags = old_flags; if (fd < 0) { printf("Failed to create map for write only test '%s'!\n", strerror(errno)); @@ -1567,13 +1565,17 @@ static void test_map_wronly_hash(void) static void test_map_wronly_stack_or_queue(enum bpf_map_type map_type) { int fd, value = 0; + __u32 old_flags; + assert(map_type == BPF_MAP_TYPE_QUEUE || map_type == BPF_MAP_TYPE_STACK); - fd = bpf_create_map(map_type, 0, sizeof(value), MAP_SIZE, - map_flags | BPF_F_WRONLY); + old_flags = map_opts.map_flags; + map_opts.map_flags |= BPF_F_WRONLY; + fd = bpf_map_create(map_type, NULL, 0, sizeof(value), MAP_SIZE, &map_opts); + map_opts.map_flags = old_flags; /* Stack/Queue maps do not support BPF_F_NO_PREALLOC */ - if (map_flags & BPF_F_NO_PREALLOC) { + if (map_opts.map_flags & BPF_F_NO_PREALLOC) { assert(fd < 0 && errno == EINVAL); return; } @@ -1700,8 +1702,8 @@ static void test_reuseport_array(void) __u32 fds_idx = 0; int fd; - map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, - sizeof(__u32), sizeof(__u64), array_size, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, NULL, + sizeof(__u32), sizeof(__u64), array_size, NULL); CHECK(map_fd < 0, "reuseport array create", "map_fd:%d, errno:%d\n", map_fd, errno); @@ -1837,8 +1839,8 @@ static void test_reuseport_array(void) close(map_fd); /* Test 32 bit fd */ - map_fd = bpf_create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, - sizeof(__u32), sizeof(__u32), array_size, 0); + map_fd = bpf_map_create(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, NULL, + sizeof(__u32), sizeof(__u32), array_size, NULL); CHECK(map_fd < 0, "reuseport array create", "map_fd:%d, errno:%d\n", map_fd, errno); prepare_reuseport_grp(SOCK_STREAM, map_fd, sizeof(__u32), &fd64, @@ -1896,10 +1898,10 @@ int main(void) libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - map_flags = 0; + map_opts.map_flags = 0; run_all_tests(); - map_flags = BPF_F_NO_PREALLOC; + map_opts.map_flags = BPF_F_NO_PREALLOC; run_all_tests(); #define DEFINE_TEST(name) test_##name(); diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index c65986bd9d07..296928948bb9 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -473,11 +473,11 @@ static struct prog_test_def prog_test_defs[] = { #include <prog_tests/tests.h> #undef DEFINE_TEST }; -const int prog_test_cnt = ARRAY_SIZE(prog_test_defs); +static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs); const char *argp_program_version = "test_progs 0.1"; const char *argp_program_bug_address = "<bpf@vger.kernel.org>"; -const char argp_program_doc[] = "BPF selftests test runner"; +static const char argp_program_doc[] = "BPF selftests test runner"; enum ARG_KEYS { ARG_TEST_NUM = 'n', @@ -939,7 +939,7 @@ static void *dispatch_thread(void *ctx) { struct dispatch_data *data = ctx; int sock_fd; - FILE *log_fd = NULL; + FILE *log_fp = NULL; sock_fd = data->sock_fd; @@ -1002,8 +1002,8 @@ static void *dispatch_thread(void *ctx) /* collect all logs */ if (msg_test_done.test_done.have_log) { - log_fd = open_memstream(&result->log_buf, &result->log_cnt); - if (!log_fd) + log_fp = open_memstream(&result->log_buf, &result->log_cnt); + if (!log_fp) goto error; while (true) { @@ -1014,12 +1014,12 @@ static void *dispatch_thread(void *ctx) if (msg_log.type != MSG_TEST_LOG) goto error; - fprintf(log_fd, "%s", msg_log.test_log.log_buf); + fprintf(log_fp, "%s", msg_log.test_log.log_buf); if (msg_log.test_log.is_last) break; } - fclose(log_fd); - log_fd = NULL; + fclose(log_fp); + log_fp = NULL; } /* output log */ { @@ -1045,8 +1045,8 @@ error: if (env.debug) fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno)); - if (log_fd) - fclose(log_fd); + if (log_fp) + fclose(log_fp); done: { struct msg msg_exit; @@ -1198,11 +1198,11 @@ static int server_main(void) env.sub_succ_cnt += result->sub_succ_cnt; } + print_all_error_logs(); + fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); - print_all_error_logs(); - /* reap all workers */ for (i = 0; i < env.workers; i++) { int wstatus, pid; @@ -1484,11 +1484,11 @@ int main(int argc, char **argv) if (env.list_test_names) goto out; + print_all_error_logs(); + fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); - print_all_error_logs(); - close(env.saved_netns_fd); out: if (!env.list_test_names && env.has_testmod) diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index 05c9e4944c01..f0c8d05ba6d1 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -663,23 +663,36 @@ static int load_insns(const struct sock_addr_test *test, static int load_path(const struct sock_addr_test *test, const char *path) { - struct bpf_prog_load_attr attr; struct bpf_object *obj; - int prog_fd; + struct bpf_program *prog; + int err; - memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); - attr.file = path; - attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; - attr.expected_attach_type = test->expected_attach_type; - attr.prog_flags = BPF_F_TEST_RND_HI32; + obj = bpf_object__open_file(path, NULL); + err = libbpf_get_error(obj); + if (err) { + log_err(">>> Opening BPF object (%s) error.\n", path); + return -1; + } + + prog = bpf_object__next_program(obj, NULL); + if (!prog) + goto err_out; - if (bpf_prog_load_xattr(&attr, &obj, &prog_fd)) { + bpf_program__set_type(prog, BPF_PROG_TYPE_CGROUP_SOCK_ADDR); + bpf_program__set_expected_attach_type(prog, test->expected_attach_type); + bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32); + + err = bpf_object__load(obj); + if (err) { if (test->expected_result != LOAD_REJECT) log_err(">>> Loading program (%s) error.\n", path); - return -1; + goto err_out; } - return prog_fd; + return bpf_program__fd(prog); +err_out: + bpf_object__close(obj); + return -1; } static int bind4_prog_load(const struct sock_addr_test *test) diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c index 5c7bea525626..0851c42ee31c 100644 --- a/tools/testing/selftests/bpf/test_tag.c +++ b/tools/testing/selftests/bpf/test_tag.c @@ -185,11 +185,12 @@ static void do_test(uint32_t *tests, int start_insns, int fd_map, int main(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); uint32_t tests = 0; int i, fd_map; - fd_map = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int), - sizeof(int), 1, BPF_F_NO_PREALLOC); + fd_map = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int), + sizeof(int), 1, &opts); assert(fd_map > 0); for (i = 0; i < 5; i++) { diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 9fbef396a716..16ce2ad097f4 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -462,11 +462,11 @@ static int __create_map(uint32_t type, uint32_t size_key, uint32_t size_value, uint32_t max_elem, uint32_t extra_flags) { + LIBBPF_OPTS(bpf_map_create_opts, opts); int fd; - fd = bpf_create_map(type, size_key, size_value, max_elem, - (type == BPF_MAP_TYPE_HASH ? - BPF_F_NO_PREALLOC : 0) | extra_flags); + opts.map_flags = (type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0) | extra_flags; + fd = bpf_map_create(type, NULL, size_key, size_value, max_elem, &opts); if (fd < 0) { if (skip_unsupported_map(type)) return -1; @@ -522,8 +522,8 @@ static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem, { int mfd, p1fd, p2fd, p3fd; - mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), - sizeof(int), max_elem, 0); + mfd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, NULL, sizeof(int), + sizeof(int), max_elem, NULL); if (mfd < 0) { if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY)) return -1; @@ -553,10 +553,11 @@ err: static int create_map_in_map(void) { + LIBBPF_OPTS(bpf_map_create_opts, opts); int inner_map_fd, outer_map_fd; - inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), - sizeof(int), 1, 0); + inner_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), + sizeof(int), 1, NULL); if (inner_map_fd < 0) { if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY)) return -1; @@ -564,8 +565,9 @@ static int create_map_in_map(void) return inner_map_fd; } - outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, - sizeof(int), inner_map_fd, 1, 0); + opts.inner_map_fd = inner_map_fd; + outer_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, + sizeof(int), sizeof(int), 1, &opts); if (outer_map_fd < 0) { if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS)) return -1; @@ -584,8 +586,8 @@ static int create_cgroup_storage(bool percpu) BPF_MAP_TYPE_CGROUP_STORAGE; int fd; - fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key), - TEST_DATA_LEN, 0, 0); + fd = bpf_map_create(type, NULL, sizeof(struct bpf_cgroup_storage_key), + TEST_DATA_LEN, 0, NULL); if (fd < 0) { if (skip_unsupported_map(type)) return -1; @@ -652,7 +654,7 @@ static int load_btf(void) memcpy(ptr, btf_str_sec, hdr.str_len); ptr += hdr.str_len; - btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0); + btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, NULL); free(raw_btf); if (btf_fd < 0) return -1; @@ -661,22 +663,17 @@ static int load_btf(void) static int create_map_spin_lock(void) { - struct bpf_create_map_attr attr = { - .name = "test_map", - .map_type = BPF_MAP_TYPE_ARRAY, - .key_size = 4, - .value_size = 8, - .max_entries = 1, + LIBBPF_OPTS(bpf_map_create_opts, opts, .btf_key_type_id = 1, .btf_value_type_id = 3, - }; + ); int fd, btf_fd; btf_fd = load_btf(); if (btf_fd < 0) return -1; - attr.btf_fd = btf_fd; - fd = bpf_create_map_xattr(&attr); + opts.btf_fd = btf_fd; + fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 8, 1, &opts); if (fd < 0) printf("Failed to create map with spin_lock\n"); return fd; @@ -684,24 +681,19 @@ static int create_map_spin_lock(void) static int create_sk_storage_map(void) { - struct bpf_create_map_attr attr = { - .name = "test_map", - .map_type = BPF_MAP_TYPE_SK_STORAGE, - .key_size = 4, - .value_size = 8, - .max_entries = 0, + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC, .btf_key_type_id = 1, .btf_value_type_id = 3, - }; + ); int fd, btf_fd; btf_fd = load_btf(); if (btf_fd < 0) return -1; - attr.btf_fd = btf_fd; - fd = bpf_create_map_xattr(&attr); - close(attr.btf_fd); + opts.btf_fd = btf_fd; + fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "test_map", 4, 8, 0, &opts); + close(opts.btf_fd); if (fd < 0) printf("Failed to create sk_storage_map\n"); return fd; diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index 52c2f24e0898..795b6798ccee 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -88,12 +88,15 @@ int extra_prog_load_log_flags = 0; int bpf_prog_test_load(const char *file, enum bpf_prog_type type, struct bpf_object **pobj, int *prog_fd) { - struct bpf_object_load_attr attr = {}; + LIBBPF_OPTS(bpf_object_open_opts, opts, + .kernel_log_level = extra_prog_load_log_flags, + ); struct bpf_object *obj; struct bpf_program *prog; + __u32 flags; int err; - obj = bpf_object__open(file); + obj = bpf_object__open_file(file, &opts); if (!obj) return -errno; @@ -106,11 +109,10 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type, if (type != BPF_PROG_TYPE_UNSPEC) bpf_program__set_type(prog, type); - bpf_program__set_extra_flags(prog, BPF_F_TEST_RND_HI32); + flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32; + bpf_program__set_flags(prog, flags); - attr.obj = obj; - attr.log_level = extra_prog_load_log_flags; - err = bpf_object__load_xattr(&attr); + err = bpf_object__load(obj); if (err) goto err_out; diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c index bfb97383e6b5..b4ec228eb95d 100644 --- a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c +++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c @@ -35,7 +35,7 @@ .prog_type = BPF_PROG_TYPE_XDP, }, { - "XDP pkt read, pkt_data' > pkt_end, good access", + "XDP pkt read, pkt_data' > pkt_end, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -88,6 +88,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_end > pkt_data', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -106,16 +141,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end > pkt_data', bad access 1", + "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -143,6 +178,42 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_end > pkt_data', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end > pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data' < pkt_end, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -161,16 +232,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' < pkt_end, bad access 1", + "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -198,7 +269,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end < pkt_data', good access", + "XDP pkt read, pkt_data' < pkt_end, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end < pkt_data', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -251,6 +358,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_end < pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data' >= pkt_end, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -268,15 +410,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' >= pkt_end, bad access 1", + "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -304,7 +446,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end >= pkt_data', good access", + "XDP pkt read, pkt_data' >= pkt_end, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end >= pkt_data', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -359,7 +535,44 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' <= pkt_end, good access", + "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' <= pkt_end, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -414,6 +627,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_end <= pkt_data', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -431,15 +681,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end <= pkt_data', bad access 1", + "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -467,7 +717,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' > pkt_data, good access", + "XDP pkt read, pkt_end <= pkt_data', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' > pkt_data, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -520,6 +804,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data > pkt_meta', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -538,16 +857,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data > pkt_meta', bad access 1", + "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -575,6 +894,42 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data > pkt_meta', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_meta' < pkt_data, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -593,16 +948,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' < pkt_data, bad access 1", + "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -630,7 +985,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data < pkt_meta', good access", + "XDP pkt read, pkt_meta' < pkt_data, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data < pkt_meta', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -683,6 +1074,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_meta' >= pkt_data, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -700,15 +1126,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' >= pkt_data, bad access 1", + "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -736,7 +1162,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data >= pkt_meta', good access", + "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data >= pkt_meta', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -791,7 +1251,44 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' <= pkt_data, good access", + "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -846,6 +1343,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data <= pkt_meta', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -863,15 +1397,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data <= pkt_meta', bad access 1", + "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -898,3 +1432,37 @@ .prog_type = BPF_PROG_TYPE_XDP, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, +{ + "XDP pkt read, pkt_data <= pkt_meta', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh index 027198768fad..5e43c79ddc6e 100755 --- a/tools/testing/selftests/bpf/vmtest.sh +++ b/tools/testing/selftests/bpf/vmtest.sh @@ -4,17 +4,34 @@ set -u set -e -# This script currently only works for x86_64, as -# it is based on the VM image used by the BPF CI which is -# x86_64. -QEMU_BINARY="${QEMU_BINARY:="qemu-system-x86_64"}" -X86_BZIMAGE="arch/x86/boot/bzImage" +# This script currently only works for x86_64 and s390x, as +# it is based on the VM image used by the BPF CI, which is +# available only for these architectures. +ARCH="$(uname -m)" +case "${ARCH}" in +s390x) + QEMU_BINARY=qemu-system-s390x + QEMU_CONSOLE="ttyS1" + QEMU_FLAGS=(-smp 2) + BZIMAGE="arch/s390/boot/compressed/vmlinux" + ;; +x86_64) + QEMU_BINARY=qemu-system-x86_64 + QEMU_CONSOLE="ttyS0,115200" + QEMU_FLAGS=(-cpu host -smp 8) + BZIMAGE="arch/x86/boot/bzImage" + ;; +*) + echo "Unsupported architecture" + exit 1 + ;; +esac DEFAULT_COMMAND="./test_progs" MOUNT_DIR="mnt" ROOTFS_IMAGE="root.img" OUTPUT_DIR="$HOME/.bpf_selftests" -KCONFIG_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/latest.config" -KCONFIG_API_URL="https://api.github.com/repos/libbpf/libbpf/contents/travis-ci/vmtest/configs/latest.config" +KCONFIG_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/config-latest.${ARCH}" +KCONFIG_API_URL="https://api.github.com/repos/libbpf/libbpf/contents/travis-ci/vmtest/configs/config-latest.${ARCH}" INDEX_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/INDEX" NUM_COMPILE_JOBS="$(nproc)" LOG_FILE_BASE="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S")" @@ -85,7 +102,7 @@ newest_rootfs_version() { { for file in "${!URLS[@]}"; do - if [[ $file =~ ^libbpf-vmtest-rootfs-(.*)\.tar\.zst$ ]]; then + if [[ $file =~ ^"${ARCH}"/libbpf-vmtest-rootfs-(.*)\.tar\.zst$ ]]; then echo "${BASH_REMATCH[1]}" fi done @@ -102,7 +119,7 @@ download_rootfs() exit 1 fi - download "libbpf-vmtest-rootfs-$rootfsversion.tar.zst" | + download "${ARCH}/libbpf-vmtest-rootfs-$rootfsversion.tar.zst" | zstd -d | sudo tar -C "$dir" -x } @@ -224,13 +241,12 @@ EOF -nodefaults \ -display none \ -serial mon:stdio \ - -cpu host \ + "${qemu_flags[@]}" \ -enable-kvm \ - -smp 8 \ -m 4G \ -drive file="${rootfs_img}",format=raw,index=1,media=disk,if=virtio,cache=none \ -kernel "${kernel_bzimage}" \ - -append "root=/dev/vda rw console=ttyS0,115200" + -append "root=/dev/vda rw console=${QEMU_CONSOLE}" } copy_logs() @@ -282,7 +298,7 @@ main() local kernel_checkout=$(realpath "${script_dir}"/../../../../) # By default the script searches for the kernel in the checkout directory but # it also obeys environment variables O= and KBUILD_OUTPUT= - local kernel_bzimage="${kernel_checkout}/${X86_BZIMAGE}" + local kernel_bzimage="${kernel_checkout}/${BZIMAGE}" local command="${DEFAULT_COMMAND}" local update_image="no" local exit_command="poweroff -f" @@ -337,13 +353,13 @@ main() if is_rel_path "${O}"; then O="$(realpath "${PWD}/${O}")" fi - kernel_bzimage="${O}/${X86_BZIMAGE}" + kernel_bzimage="${O}/${BZIMAGE}" make_command="${make_command} O=${O}" elif [[ "${KBUILD_OUTPUT:=""}" != "" ]]; then if is_rel_path "${KBUILD_OUTPUT}"; then KBUILD_OUTPUT="$(realpath "${PWD}/${KBUILD_OUTPUT}")" fi - kernel_bzimage="${KBUILD_OUTPUT}/${X86_BZIMAGE}" + kernel_bzimage="${KBUILD_OUTPUT}/${BZIMAGE}" make_command="${make_command} KBUILD_OUTPUT=${KBUILD_OUTPUT}" fi diff --git a/tools/testing/selftests/bpf/xdp_redirect_multi.c b/tools/testing/selftests/bpf/xdp_redirect_multi.c index f5ffba341c17..51c8224b4ccc 100644 --- a/tools/testing/selftests/bpf/xdp_redirect_multi.c +++ b/tools/testing/selftests/bpf/xdp_redirect_multi.c @@ -85,10 +85,7 @@ int main(int argc, char **argv) { int prog_fd, group_all, mac_map; struct bpf_program *ingress_prog, *egress_prog; - struct bpf_prog_load_attr prog_load_attr = { - .prog_type = BPF_PROG_TYPE_UNSPEC, - }; - int i, ret, opt, egress_prog_fd = 0; + int i, err, ret, opt, egress_prog_fd = 0; struct bpf_devmap_val devmap_val; bool attach_egress_prog = false; unsigned char mac_addr[6]; @@ -147,10 +144,14 @@ int main(int argc, char **argv) printf("\n"); snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - prog_load_attr.file = filename; - - if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) + obj = bpf_object__open_file(filename, NULL); + err = libbpf_get_error(obj); + if (err) + goto err_out; + err = bpf_object__load(obj); + if (err) goto err_out; + prog_fd = bpf_program__fd(bpf_object__next_program(obj, NULL)); if (attach_egress_prog) group_all = bpf_object__find_map_fd_by_name(obj, "map_egress"); diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c index fe7f423b8c3f..0a5d23da486d 100644 --- a/tools/testing/selftests/bpf/xdpxceiver.c +++ b/tools/testing/selftests/bpf/xdpxceiver.c @@ -100,6 +100,12 @@ #include "xdpxceiver.h" #include "../kselftest.h" +/* AF_XDP APIs were moved into libxdp and marked as deprecated in libbpf. + * Until xdpxceiver is either moved or re-writed into libxdp, suppress + * deprecation warnings in this file + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62"; static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61"; static const char *IP1 = "192.168.100.162"; @@ -1217,7 +1223,7 @@ static bool hugepages_present(struct ifobject *ifobject) void *bufs; bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_HUGETLB, -1, 0); + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); if (bufs == MAP_FAILED) return false; @@ -1364,6 +1370,10 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_ testapp_invalid_desc(test); break; case TEST_TYPE_UNALIGNED_INV_DESC: + if (!hugepages_present(test->ifobj_tx)) { + ksft_test_result_skip("No 2M huge pages present.\n"); + return; + } test_spec_set_name(test, "UNALIGNED_INV_DESC"); test->ifobj_tx->umem->unaligned_mode = true; test->ifobj_rx->umem->unaligned_mode = true; diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh index 729a86cc4ede..3639b89c81ba 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh @@ -145,23 +145,6 @@ sanitization_single_dev_no_local_ip_test() log_test "vxlan device with no local ip" } -sanitization_single_dev_local_ipv6_test() -{ - RET=0 - - ip link add dev br0 type bridge mcast_snooping 0 - - ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \ - ttl 20 tos inherit local 2001:db8::1 dstport 4789 - - sanitization_single_dev_test_fail - - ip link del dev vxlan0 - ip link del dev br0 - - log_test "vxlan device with local ipv6 address" -} - sanitization_single_dev_learning_enabled_test() { RET=0 @@ -276,7 +259,6 @@ sanitization_single_dev_test() sanitization_single_dev_mcast_enabled_test sanitization_single_dev_mcast_group_test sanitization_single_dev_no_local_ip_test - sanitization_single_dev_local_ipv6_test sanitization_single_dev_learning_enabled_test sanitization_single_dev_local_interface_test sanitization_single_dev_port_range_test diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c index f968dfd4ee88..aed9dc3ca1e9 100644 --- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c +++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c @@ -12,6 +12,7 @@ #include <stdio.h> #include <stdlib.h> #include <string.h> +#include <sys/resource.h> #include "test_util.h" @@ -40,11 +41,40 @@ int main(int argc, char *argv[]) { int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID); int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); + /* + * Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds + + * an arbitrary number for everything else. + */ + int nr_fds_wanted = kvm_max_vcpus + 100; + struct rlimit rl; pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id); pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus); /* + * Check that we're allowed to open nr_fds_wanted file descriptors and + * try raising the limits if needed. + */ + TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!"); + + if (rl.rlim_cur < nr_fds_wanted) { + rl.rlim_cur = nr_fds_wanted; + if (rl.rlim_max < nr_fds_wanted) { + int old_rlim_max = rl.rlim_max; + rl.rlim_max = nr_fds_wanted; + + int r = setrlimit(RLIMIT_NOFILE, &rl); + if (r < 0) { + printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n", + old_rlim_max, nr_fds_wanted); + exit(KSFT_SKIP); + } + } else { + TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!"); + } + } + + /* * Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID. * Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID * in this case. diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c index 3836322add00..ba1fdc3dcf4a 100644 --- a/tools/testing/selftests/kvm/kvm_page_table_test.c +++ b/tools/testing/selftests/kvm/kvm_page_table_test.c @@ -280,7 +280,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) #ifdef __s390x__ alignment = max(0x100000, alignment); #endif - guest_test_phys_mem = align_down(guest_test_virt_mem, alignment); + guest_test_phys_mem = align_down(guest_test_phys_mem, alignment); /* Set up the shared data structure test_args */ test_args.vm = vm; diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 91d88aaa9899..672915ce73d8 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -165,10 +165,10 @@ static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid, vcpu_set_cpuid(vm, VCPU_ID, cpuid); } -static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, - struct kvm_cpuid2 *best) +static void guest_test_msrs_access(void) { struct kvm_run *run; + struct kvm_vm *vm; struct ucall uc; int stage = 0, r; struct kvm_cpuid_entry2 feat = { @@ -180,11 +180,34 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, struct kvm_cpuid_entry2 dbg = { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES }; - struct kvm_enable_cap cap = {0}; - - run = vcpu_state(vm, VCPU_ID); + struct kvm_cpuid2 *best; + vm_vaddr_t msr_gva; + struct kvm_enable_cap cap = { + .cap = KVM_CAP_HYPERV_ENFORCE_CPUID, + .args = {1} + }; + struct msr_data *msr; while (true) { + vm = vm_create_default(VCPU_ID, 0, guest_msr); + + msr_gva = vm_vaddr_alloc_page(vm); + memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); + msr = addr_gva2hva(vm, msr_gva); + + vcpu_args_set(vm, VCPU_ID, 1, msr_gva); + vcpu_enable_cap(vm, VCPU_ID, &cap); + + vcpu_set_hv_cpuid(vm, VCPU_ID); + + best = kvm_get_supported_hv_cpuid(); + + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); + + run = vcpu_state(vm, VCPU_ID); + switch (stage) { case 0: /* @@ -315,6 +338,7 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, * capability enabled and guest visible CPUID bit unset. */ cap.cap = KVM_CAP_HYPERV_SYNIC2; + cap.args[0] = 0; vcpu_enable_cap(vm, VCPU_ID, &cap); break; case 22: @@ -461,9 +485,9 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, switch (get_ucall(vm, VCPU_ID, &uc)) { case UCALL_SYNC: - TEST_ASSERT(uc.args[1] == stage, - "Unexpected stage: %ld (%d expected)\n", - uc.args[1], stage); + TEST_ASSERT(uc.args[1] == 0, + "Unexpected stage: %ld (0 expected)\n", + uc.args[1]); break; case UCALL_ABORT: TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], @@ -474,13 +498,14 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, } stage++; + kvm_vm_free(vm); } } -static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall, - void *input, void *output, struct kvm_cpuid2 *best) +static void guest_test_hcalls_access(void) { struct kvm_run *run; + struct kvm_vm *vm; struct ucall uc; int stage = 0, r; struct kvm_cpuid_entry2 feat = { @@ -493,10 +518,38 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall struct kvm_cpuid_entry2 dbg = { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES }; - - run = vcpu_state(vm, VCPU_ID); + struct kvm_enable_cap cap = { + .cap = KVM_CAP_HYPERV_ENFORCE_CPUID, + .args = {1} + }; + vm_vaddr_t hcall_page, hcall_params; + struct hcall_data *hcall; + struct kvm_cpuid2 *best; while (true) { + vm = vm_create_default(VCPU_ID, 0, guest_hcall); + + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); + + /* Hypercall input/output */ + hcall_page = vm_vaddr_alloc_pages(vm, 2); + hcall = addr_gva2hva(vm, hcall_page); + memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); + + hcall_params = vm_vaddr_alloc_page(vm); + memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); + + vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params); + vcpu_enable_cap(vm, VCPU_ID, &cap); + + vcpu_set_hv_cpuid(vm, VCPU_ID); + + best = kvm_get_supported_hv_cpuid(); + + run = vcpu_state(vm, VCPU_ID); + switch (stage) { case 0: hcall->control = 0xdeadbeef; @@ -606,9 +659,9 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall switch (get_ucall(vm, VCPU_ID, &uc)) { case UCALL_SYNC: - TEST_ASSERT(uc.args[1] == stage, - "Unexpected stage: %ld (%d expected)\n", - uc.args[1], stage); + TEST_ASSERT(uc.args[1] == 0, + "Unexpected stage: %ld (0 expected)\n", + uc.args[1]); break; case UCALL_ABORT: TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], @@ -619,66 +672,15 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall } stage++; + kvm_vm_free(vm); } } int main(void) { - struct kvm_cpuid2 *best; - struct kvm_vm *vm; - vm_vaddr_t msr_gva, hcall_page, hcall_params; - struct kvm_enable_cap cap = { - .cap = KVM_CAP_HYPERV_ENFORCE_CPUID, - .args = {1} - }; - - /* Test MSRs */ - vm = vm_create_default(VCPU_ID, 0, guest_msr); - - msr_gva = vm_vaddr_alloc_page(vm); - memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); - vcpu_args_set(vm, VCPU_ID, 1, msr_gva); - vcpu_enable_cap(vm, VCPU_ID, &cap); - - vcpu_set_hv_cpuid(vm, VCPU_ID); - - best = kvm_get_supported_hv_cpuid(); - - vm_init_descriptor_tables(vm); - vcpu_init_descriptor_tables(vm, VCPU_ID); - vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); - pr_info("Testing access to Hyper-V specific MSRs\n"); - guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva), - best); - kvm_vm_free(vm); - - /* Test hypercalls */ - vm = vm_create_default(VCPU_ID, 0, guest_hcall); - - vm_init_descriptor_tables(vm); - vcpu_init_descriptor_tables(vm, VCPU_ID); - vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); - - /* Hypercall input/output */ - hcall_page = vm_vaddr_alloc_pages(vm, 2); - memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); - - hcall_params = vm_vaddr_alloc_page(vm); - memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); - - vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params); - vcpu_enable_cap(vm, VCPU_ID, &cap); - - vcpu_set_hv_cpuid(vm, VCPU_ID); - - best = kvm_get_supported_hv_cpuid(); + guest_test_msrs_access(); pr_info("Testing access to Hyper-V hypercalls\n"); - guest_test_hcalls_access(vm, addr_gva2hva(vm, hcall_params), - addr_gva2hva(vm, hcall_page), - addr_gva2hva(vm, hcall_page) + getpagesize(), - best); - - kvm_vm_free(vm); + guest_test_hcalls_access(); } diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c index 5ba325cd64bf..29b18d565cf4 100644 --- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c +++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c @@ -54,12 +54,15 @@ static struct kvm_vm *sev_vm_create(bool es) return vm; } -static struct kvm_vm *__vm_create(void) +static struct kvm_vm *aux_vm_create(bool with_vcpus) { struct kvm_vm *vm; int i; vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); + if (!with_vcpus) + return vm; + for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i) vm_vcpu_add(vm, i); @@ -89,11 +92,11 @@ static void test_sev_migrate_from(bool es) { struct kvm_vm *src_vm; struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS]; - int i; + int i, ret; src_vm = sev_vm_create(es); for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i) - dst_vms[i] = __vm_create(); + dst_vms[i] = aux_vm_create(true); /* Initial migration from the src to the first dst. */ sev_migrate_from(dst_vms[0]->fd, src_vm->fd); @@ -102,7 +105,10 @@ static void test_sev_migrate_from(bool es) sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd); /* Migrate the guest back to the original VM. */ - sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd); + ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd); + TEST_ASSERT(ret == -1 && errno == EIO, + "VM that was migrated from should be dead. ret %d, errno: %d\n", ret, + errno); kvm_vm_free(src_vm); for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i) @@ -146,6 +152,8 @@ static void test_sev_migrate_locking(void) for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) pthread_join(pt[i], NULL); + for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) + kvm_vm_free(input[i].vm); } static void test_sev_migrate_parameters(void) @@ -157,12 +165,11 @@ static void test_sev_migrate_parameters(void) sev_vm = sev_vm_create(/* es= */ false); sev_es_vm = sev_vm_create(/* es= */ true); vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); - vm_no_sev = __vm_create(); + vm_no_sev = aux_vm_create(true); sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL); vm_vcpu_add(sev_es_vm_no_vmsa, 1); - ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd); TEST_ASSERT( ret == -1 && errno == EINVAL, @@ -191,13 +198,151 @@ static void test_sev_migrate_parameters(void) TEST_ASSERT(ret == -1 && errno == EINVAL, "Migrations require SEV enabled. ret %d, errno: %d\n", ret, errno); + + kvm_vm_free(sev_vm); + kvm_vm_free(sev_es_vm); + kvm_vm_free(sev_es_vm_no_vmsa); + kvm_vm_free(vm_no_vcpu); + kvm_vm_free(vm_no_sev); +} + +static int __sev_mirror_create(int dst_fd, int src_fd) +{ + struct kvm_enable_cap cap = { + .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM, + .args = { src_fd } + }; + + return ioctl(dst_fd, KVM_ENABLE_CAP, &cap); +} + + +static void sev_mirror_create(int dst_fd, int src_fd) +{ + int ret; + + ret = __sev_mirror_create(dst_fd, src_fd); + TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno); +} + +static void test_sev_mirror(bool es) +{ + struct kvm_vm *src_vm, *dst_vm; + struct kvm_sev_launch_start start = { + .policy = es ? SEV_POLICY_ES : 0 + }; + int i; + + src_vm = sev_vm_create(es); + dst_vm = aux_vm_create(false); + + sev_mirror_create(dst_vm->fd, src_vm->fd); + + /* Check that we can complete creation of the mirror VM. */ + for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i) + vm_vcpu_add(dst_vm, i); + sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_START, &start); + if (es) + sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); + + kvm_vm_free(src_vm); + kvm_vm_free(dst_vm); +} + +static void test_sev_mirror_parameters(void) +{ + struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu; + int ret; + + sev_vm = sev_vm_create(/* es= */ false); + sev_es_vm = sev_vm_create(/* es= */ true); + vm_with_vcpu = aux_vm_create(true); + vm_no_vcpu = aux_vm_create(false); + + ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "Should not be able copy context to self. ret: %d, errno: %d\n", + ret, errno); + + ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n", + ret, errno); + + ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n", + ret, errno); + + ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd); + TEST_ASSERT(ret == -1 && errno == EINVAL, + "Copy context requires SEV enabled. ret %d, errno: %d\n", ret, + errno); + + ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n", + ret, errno); + + kvm_vm_free(sev_vm); + kvm_vm_free(sev_es_vm); + kvm_vm_free(vm_with_vcpu); + kvm_vm_free(vm_no_vcpu); +} + +static void test_sev_move_copy(void) +{ + struct kvm_vm *dst_vm, *sev_vm, *mirror_vm, *dst_mirror_vm; + int ret; + + sev_vm = sev_vm_create(/* es= */ false); + dst_vm = aux_vm_create(true); + mirror_vm = aux_vm_create(false); + dst_mirror_vm = aux_vm_create(false); + + sev_mirror_create(mirror_vm->fd, sev_vm->fd); + ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd); + TEST_ASSERT(ret == -1 && errno == EBUSY, + "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret, + errno); + + /* The mirror itself can be migrated. */ + sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd); + ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd); + TEST_ASSERT(ret == -1 && errno == EBUSY, + "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret, + errno); + + /* + * mirror_vm is not a mirror anymore, dst_mirror_vm is. Thus, + * the owner can be copied as soon as dst_mirror_vm is gone. + */ + kvm_vm_free(dst_mirror_vm); + sev_migrate_from(dst_vm->fd, sev_vm->fd); + + kvm_vm_free(mirror_vm); + kvm_vm_free(dst_vm); + kvm_vm_free(sev_vm); } int main(int argc, char *argv[]) { - test_sev_migrate_from(/* es= */ false); - test_sev_migrate_from(/* es= */ true); - test_sev_migrate_locking(); - test_sev_migrate_parameters(); + if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) { + test_sev_migrate_from(/* es= */ false); + test_sev_migrate_from(/* es= */ true); + test_sev_migrate_locking(); + test_sev_migrate_parameters(); + if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) + test_sev_move_copy(); + } + if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) { + test_sev_mirror(/* es= */ false); + test_sev_mirror(/* es= */ true); + test_sev_mirror_parameters(); + } return 0; } diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh index 7caa4f0e067d..98e4d5a2f587 100755 --- a/tools/testing/selftests/net/fcnal-test.sh +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -4042,8 +4042,8 @@ EOF ################################################################################ # main -TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_addr_bind ipv4_runtime ipv4_netfilter" -TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_addr_bind ipv6_runtime ipv6_netfilter" +TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_bind ipv4_runtime ipv4_netfilter" +TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_bind ipv6_runtime ipv6_netfilter" TESTS_OTHER="use_cases" PAUSE_ON_FAIL=no @@ -4108,8 +4108,6 @@ do # setup namespaces and config, but do not run any tests setup) setup; exit 0;; vrf_setup) setup "yes"; exit 0;; - - help) echo "Test names: $TESTS"; exit 0;; esac done @@ -4117,3 +4115,11 @@ cleanup 2>/dev/null printf "\nTests passed: %3d\n" ${nsuccess} printf "Tests failed: %3d\n" ${nfail} + +if [ $nfail -ne 0 ]; then + exit 1 # KSFT_FAIL +elif [ $nsuccess -eq 0 ]; then + exit $ksft_skip +fi + +exit 0 # KSFT_PASS diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 5abe92d55b69..996af1ae3d3d 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -444,24 +444,63 @@ fib_rp_filter_test() setup set -e + ip netns add ns2 + ip netns set ns2 auto + + ip -netns ns2 link set dev lo up + + $IP link add name veth1 type veth peer name veth2 + $IP link set dev veth2 netns ns2 + $IP address add 192.0.2.1/24 dev veth1 + ip -netns ns2 address add 192.0.2.1/24 dev veth2 + $IP link set dev veth1 up + ip -netns ns2 link set dev veth2 up + $IP link set dev lo address 52:54:00:6a:c7:5e - $IP link set dummy0 address 52:54:00:6a:c7:5e - $IP link add dummy1 type dummy - $IP link set dummy1 address 52:54:00:6a:c7:5e - $IP link set dev dummy1 up + $IP link set dev veth1 address 52:54:00:6a:c7:5e + ip -netns ns2 link set dev lo address 52:54:00:6a:c7:5e + ip -netns ns2 link set dev veth2 address 52:54:00:6a:c7:5e + + # 1. (ns2) redirect lo's egress to veth2's egress + ip netns exec ns2 tc qdisc add dev lo parent root handle 1: fq_codel + ip netns exec ns2 tc filter add dev lo parent 1: protocol arp basic \ + action mirred egress redirect dev veth2 + ip netns exec ns2 tc filter add dev lo parent 1: protocol ip basic \ + action mirred egress redirect dev veth2 + + # 2. (ns1) redirect veth1's ingress to lo's ingress + $NS_EXEC tc qdisc add dev veth1 ingress + $NS_EXEC tc filter add dev veth1 ingress protocol arp basic \ + action mirred ingress redirect dev lo + $NS_EXEC tc filter add dev veth1 ingress protocol ip basic \ + action mirred ingress redirect dev lo + + # 3. (ns1) redirect lo's egress to veth1's egress + $NS_EXEC tc qdisc add dev lo parent root handle 1: fq_codel + $NS_EXEC tc filter add dev lo parent 1: protocol arp basic \ + action mirred egress redirect dev veth1 + $NS_EXEC tc filter add dev lo parent 1: protocol ip basic \ + action mirred egress redirect dev veth1 + + # 4. (ns2) redirect veth2's ingress to lo's ingress + ip netns exec ns2 tc qdisc add dev veth2 ingress + ip netns exec ns2 tc filter add dev veth2 ingress protocol arp basic \ + action mirred ingress redirect dev lo + ip netns exec ns2 tc filter add dev veth2 ingress protocol ip basic \ + action mirred ingress redirect dev lo + $NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1 $NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1 $NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1 - - $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel - $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo - $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=1 + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.accept_local=1 + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.route_localnet=1 set +e - run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1" + run_cmd "ip netns exec ns2 ping -w1 -c1 192.0.2.1" log_test $? 0 "rp_filter passes local packets" - run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1" + run_cmd "ip netns exec ns2 ping -w1 -c1 127.0.0.1" log_test $? 0 "rp_filter passes loopback packets" cleanup diff --git a/tools/testing/selftests/net/gro.c b/tools/testing/selftests/net/gro.c index cf37ce86b0fd..ee0b03f3dd06 100644 --- a/tools/testing/selftests/net/gro.c +++ b/tools/testing/selftests/net/gro.c @@ -64,10 +64,6 @@ #define NUM_PACKETS 4 #define START_SEQ 100 #define START_ACK 100 -#define SIP6 "fdaa::2" -#define DIP6 "fdaa::1" -#define SIP4 "192.168.1.200" -#define DIP4 "192.168.1.100" #define ETH_P_NONE 0 #define TOTAL_HDR_LEN (ETH_HLEN + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) #define MSS (4096 - sizeof(struct tcphdr) - sizeof(struct ipv6hdr)) @@ -75,6 +71,10 @@ #define NUM_LARGE_PKT (MAX_PAYLOAD / MSS) #define MAX_HDR_LEN (ETH_HLEN + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) +static const char *addr6_src = "fdaa::2"; +static const char *addr6_dst = "fdaa::1"; +static const char *addr4_src = "192.168.1.200"; +static const char *addr4_dst = "192.168.1.100"; static int proto = -1; static uint8_t src_mac[ETH_ALEN], dst_mac[ETH_ALEN]; static char *testname = "data"; @@ -178,18 +178,18 @@ static uint16_t tcp_checksum(void *buf, int payload_len) uint32_t sum = 0; if (proto == PF_INET6) { - if (inet_pton(AF_INET6, SIP6, &ph6.saddr) != 1) + if (inet_pton(AF_INET6, addr6_src, &ph6.saddr) != 1) error(1, errno, "inet_pton6 source ip pseudo"); - if (inet_pton(AF_INET6, DIP6, &ph6.daddr) != 1) + if (inet_pton(AF_INET6, addr6_dst, &ph6.daddr) != 1) error(1, errno, "inet_pton6 dest ip pseudo"); ph6.protocol = htons(IPPROTO_TCP); ph6.payload_len = htons(sizeof(struct tcphdr) + payload_len); sum = checksum_nofold(&ph6, sizeof(ph6), 0); } else if (proto == PF_INET) { - if (inet_pton(AF_INET, SIP4, &ph4.saddr) != 1) + if (inet_pton(AF_INET, addr4_src, &ph4.saddr) != 1) error(1, errno, "inet_pton source ip pseudo"); - if (inet_pton(AF_INET, DIP4, &ph4.daddr) != 1) + if (inet_pton(AF_INET, addr4_dst, &ph4.daddr) != 1) error(1, errno, "inet_pton dest ip pseudo"); ph4.protocol = htons(IPPROTO_TCP); ph4.payload_len = htons(sizeof(struct tcphdr) + payload_len); @@ -229,9 +229,9 @@ static void fill_networklayer(void *buf, int payload_len) ip6h->payload_len = htons(sizeof(struct tcphdr) + payload_len); ip6h->nexthdr = IPPROTO_TCP; ip6h->hop_limit = 8; - if (inet_pton(AF_INET6, SIP6, &ip6h->saddr) != 1) + if (inet_pton(AF_INET6, addr6_src, &ip6h->saddr) != 1) error(1, errno, "inet_pton source ip6"); - if (inet_pton(AF_INET6, DIP6, &ip6h->daddr) != 1) + if (inet_pton(AF_INET6, addr6_dst, &ip6h->daddr) != 1) error(1, errno, "inet_pton dest ip6"); } else if (proto == PF_INET) { memset(iph, 0, sizeof(*iph)); @@ -243,9 +243,9 @@ static void fill_networklayer(void *buf, int payload_len) iph->tot_len = htons(sizeof(struct tcphdr) + payload_len + sizeof(struct iphdr)); iph->frag_off = htons(0x4000); /* DF = 1, MF = 0 */ - if (inet_pton(AF_INET, SIP4, &iph->saddr) != 1) + if (inet_pton(AF_INET, addr4_src, &iph->saddr) != 1) error(1, errno, "inet_pton source ip"); - if (inet_pton(AF_INET, DIP4, &iph->daddr) != 1) + if (inet_pton(AF_INET, addr4_dst, &iph->daddr) != 1) error(1, errno, "inet_pton dest ip"); iph->check = checksum_fold(buf, sizeof(struct iphdr), 0); } @@ -731,7 +731,7 @@ static void set_timeout(int fd) { struct timeval timeout; - timeout.tv_sec = 120; + timeout.tv_sec = 3; timeout.tv_usec = 0; if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (char *)&timeout, sizeof(timeout)) < 0) @@ -1023,11 +1023,13 @@ static void gro_receiver(void) static void parse_args(int argc, char **argv) { static const struct option opts[] = { + { "daddr", required_argument, NULL, 'd' }, { "dmac", required_argument, NULL, 'D' }, { "iface", required_argument, NULL, 'i' }, { "ipv4", no_argument, NULL, '4' }, { "ipv6", no_argument, NULL, '6' }, { "rx", no_argument, NULL, 'r' }, + { "saddr", required_argument, NULL, 's' }, { "smac", required_argument, NULL, 'S' }, { "test", required_argument, NULL, 't' }, { "verbose", no_argument, NULL, 'v' }, @@ -1035,7 +1037,7 @@ static void parse_args(int argc, char **argv) }; int c; - while ((c = getopt_long(argc, argv, "46D:i:rS:t:v", opts, NULL)) != -1) { + while ((c = getopt_long(argc, argv, "46d:D:i:rs:S:t:v", opts, NULL)) != -1) { switch (c) { case '4': proto = PF_INET; @@ -1045,6 +1047,9 @@ static void parse_args(int argc, char **argv) proto = PF_INET6; ethhdr_proto = htons(ETH_P_IPV6); break; + case 'd': + addr4_dst = addr6_dst = optarg; + break; case 'D': dmac = optarg; break; @@ -1054,6 +1059,9 @@ static void parse_args(int argc, char **argv) case 'r': tx_socket = false; break; + case 's': + addr4_src = addr6_src = optarg; + break; case 'S': smac = optarg; break; @@ -1091,5 +1099,7 @@ int main(int argc, char **argv) gro_sender(); else gro_receiver(); + + fprintf(stderr, "Gro::%s test passed.\n", testname); return 0; } diff --git a/tools/testing/selftests/net/mptcp/.gitignore b/tools/testing/selftests/net/mptcp/.gitignore index 7569d892967a..49daae73c41e 100644 --- a/tools/testing/selftests/net/mptcp/.gitignore +++ b/tools/testing/selftests/net/mptcp/.gitignore @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only mptcp_connect +mptcp_inq mptcp_sockopt pm_nl_ctl *.pcap diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile index bbf4e448bad9..0356c4501c99 100644 --- a/tools/testing/selftests/net/mptcp/Makefile +++ b/tools/testing/selftests/net/mptcp/Makefile @@ -8,7 +8,7 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \ simult_flows.sh mptcp_sockopt.sh -TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt +TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq TEST_FILES := settings diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c index ada9b80774d4..98de28ac3ba8 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.c +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c @@ -73,12 +73,20 @@ static uint32_t cfg_mark; struct cfg_cmsg_types { unsigned int cmsg_enabled:1; unsigned int timestampns:1; + unsigned int tcp_inq:1; }; struct cfg_sockopt_types { unsigned int transparent:1; }; +struct tcp_inq_state { + unsigned int last; + bool expect_eof; +}; + +static struct tcp_inq_state tcp_inq; + static struct cfg_cmsg_types cfg_cmsg_types; static struct cfg_sockopt_types cfg_sockopt_types; @@ -389,7 +397,9 @@ static size_t do_write(const int fd, char *buf, const size_t len) static void process_cmsg(struct msghdr *msgh) { struct __kernel_timespec ts; + bool inq_found = false; bool ts_found = false; + unsigned int inq = 0; struct cmsghdr *cmsg; for (cmsg = CMSG_FIRSTHDR(msgh); cmsg ; cmsg = CMSG_NXTHDR(msgh, cmsg)) { @@ -398,12 +408,27 @@ static void process_cmsg(struct msghdr *msgh) ts_found = true; continue; } + if (cmsg->cmsg_level == IPPROTO_TCP && cmsg->cmsg_type == TCP_CM_INQ) { + memcpy(&inq, CMSG_DATA(cmsg), sizeof(inq)); + inq_found = true; + continue; + } + } if (cfg_cmsg_types.timestampns) { if (!ts_found) xerror("TIMESTAMPNS not present\n"); } + + if (cfg_cmsg_types.tcp_inq) { + if (!inq_found) + xerror("TCP_INQ not present\n"); + + if (inq > 1024) + xerror("tcp_inq %u is larger than one kbyte\n", inq); + tcp_inq.last = inq; + } } static ssize_t do_recvmsg_cmsg(const int fd, char *buf, const size_t len) @@ -420,10 +445,23 @@ static ssize_t do_recvmsg_cmsg(const int fd, char *buf, const size_t len) .msg_controllen = sizeof(msg_buf), }; int flags = 0; + unsigned int last_hint = tcp_inq.last; int ret = recvmsg(fd, &msg, flags); - if (ret <= 0) + if (ret <= 0) { + if (ret == 0 && tcp_inq.expect_eof) + return ret; + + if (ret == 0 && cfg_cmsg_types.tcp_inq) + if (last_hint != 1 && last_hint != 0) + xerror("EOF but last tcp_inq hint was %u\n", last_hint); + return ret; + } + + if (tcp_inq.expect_eof) + xerror("expected EOF, last_hint %u, now %u\n", + last_hint, tcp_inq.last); if (msg.msg_controllen && !cfg_cmsg_types.cmsg_enabled) xerror("got %lu bytes of cmsg data, expected 0\n", @@ -435,6 +473,19 @@ static ssize_t do_recvmsg_cmsg(const int fd, char *buf, const size_t len) if (msg.msg_controllen) process_cmsg(&msg); + if (cfg_cmsg_types.tcp_inq) { + if ((size_t)ret < len && last_hint > (unsigned int)ret) { + if (ret + 1 != (int)last_hint) { + int next = read(fd, msg_buf, sizeof(msg_buf)); + + xerror("read %u of %u, last_hint was %u tcp_inq hint now %u next_read returned %d/%m\n", + ret, (unsigned int)len, last_hint, tcp_inq.last, next); + } else { + tcp_inq.expect_eof = true; + } + } + } + return ret; } @@ -944,6 +995,8 @@ static void apply_cmsg_types(int fd, const struct cfg_cmsg_types *cmsg) if (cmsg->timestampns) xsetsockopt(fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, &on, sizeof(on)); + if (cmsg->tcp_inq) + xsetsockopt(fd, IPPROTO_TCP, TCP_INQ, &on, sizeof(on)); } static void parse_cmsg_types(const char *type) @@ -965,6 +1018,11 @@ static void parse_cmsg_types(const char *type) return; } + if (strncmp(type, "TCPINQ", len) == 0) { + cfg_cmsg_types.tcp_inq = 1; + return; + } + fprintf(stderr, "Unrecognized cmsg option %s\n", type); exit(1); } diff --git a/tools/testing/selftests/net/mptcp/mptcp_inq.c b/tools/testing/selftests/net/mptcp/mptcp_inq.c new file mode 100644 index 000000000000..29f75e2a1116 --- /dev/null +++ b/tools/testing/selftests/net/mptcp/mptcp_inq.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE + +#include <assert.h> +#include <errno.h> +#include <fcntl.h> +#include <limits.h> +#include <string.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdint.h> +#include <inttypes.h> +#include <stdio.h> +#include <stdlib.h> +#include <strings.h> +#include <unistd.h> +#include <time.h> + +#include <sys/ioctl.h> +#include <sys/socket.h> +#include <sys/types.h> +#include <sys/wait.h> + +#include <netdb.h> +#include <netinet/in.h> + +#include <linux/tcp.h> +#include <linux/sockios.h> + +#ifndef IPPROTO_MPTCP +#define IPPROTO_MPTCP 262 +#endif +#ifndef SOL_MPTCP +#define SOL_MPTCP 284 +#endif + +static int pf = AF_INET; +static int proto_tx = IPPROTO_MPTCP; +static int proto_rx = IPPROTO_MPTCP; + +static void die_perror(const char *msg) +{ + perror(msg); + exit(1); +} + +static void die_usage(int r) +{ + fprintf(stderr, "Usage: mptcp_inq [-6] [ -t tcp|mptcp ] [ -r tcp|mptcp]\n"); + exit(r); +} + +static void xerror(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + fputc('\n', stderr); + exit(1); +} + +static const char *getxinfo_strerr(int err) +{ + if (err == EAI_SYSTEM) + return strerror(errno); + + return gai_strerror(err); +} + +static void xgetaddrinfo(const char *node, const char *service, + const struct addrinfo *hints, + struct addrinfo **res) +{ + int err = getaddrinfo(node, service, hints, res); + + if (err) { + const char *errstr = getxinfo_strerr(err); + + fprintf(stderr, "Fatal: getaddrinfo(%s:%s): %s\n", + node ? node : "", service ? service : "", errstr); + exit(1); + } +} + +static int sock_listen_mptcp(const char * const listenaddr, + const char * const port) +{ + int sock; + struct addrinfo hints = { + .ai_protocol = IPPROTO_TCP, + .ai_socktype = SOCK_STREAM, + .ai_flags = AI_PASSIVE | AI_NUMERICHOST + }; + + hints.ai_family = pf; + + struct addrinfo *a, *addr; + int one = 1; + + xgetaddrinfo(listenaddr, port, &hints, &addr); + hints.ai_family = pf; + + for (a = addr; a; a = a->ai_next) { + sock = socket(a->ai_family, a->ai_socktype, proto_rx); + if (sock < 0) + continue; + + if (-1 == setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one))) + perror("setsockopt"); + + if (bind(sock, a->ai_addr, a->ai_addrlen) == 0) + break; /* success */ + + perror("bind"); + close(sock); + sock = -1; + } + + freeaddrinfo(addr); + + if (sock < 0) + xerror("could not create listen socket"); + + if (listen(sock, 20)) + die_perror("listen"); + + return sock; +} + +static int sock_connect_mptcp(const char * const remoteaddr, + const char * const port, int proto) +{ + struct addrinfo hints = { + .ai_protocol = IPPROTO_TCP, + .ai_socktype = SOCK_STREAM, + }; + struct addrinfo *a, *addr; + int sock = -1; + + hints.ai_family = pf; + + xgetaddrinfo(remoteaddr, port, &hints, &addr); + for (a = addr; a; a = a->ai_next) { + sock = socket(a->ai_family, a->ai_socktype, proto); + if (sock < 0) + continue; + + if (connect(sock, a->ai_addr, a->ai_addrlen) == 0) + break; /* success */ + + die_perror("connect"); + } + + if (sock < 0) + xerror("could not create connect socket"); + + freeaddrinfo(addr); + return sock; +} + +static int protostr_to_num(const char *s) +{ + if (strcasecmp(s, "tcp") == 0) + return IPPROTO_TCP; + if (strcasecmp(s, "mptcp") == 0) + return IPPROTO_MPTCP; + + die_usage(1); + return 0; +} + +static void parse_opts(int argc, char **argv) +{ + int c; + + while ((c = getopt(argc, argv, "h6t:r:")) != -1) { + switch (c) { + case 'h': + die_usage(0); + break; + case '6': + pf = AF_INET6; + break; + case 't': + proto_tx = protostr_to_num(optarg); + break; + case 'r': + proto_rx = protostr_to_num(optarg); + break; + default: + die_usage(1); + break; + } + } +} + +/* wait up to timeout milliseconds */ +static void wait_for_ack(int fd, int timeout, size_t total) +{ + int i; + + for (i = 0; i < timeout; i++) { + int nsd, ret, queued = -1; + struct timespec req; + + ret = ioctl(fd, TIOCOUTQ, &queued); + if (ret < 0) + die_perror("TIOCOUTQ"); + + ret = ioctl(fd, SIOCOUTQNSD, &nsd); + if (ret < 0) + die_perror("SIOCOUTQNSD"); + + if ((size_t)queued > total) + xerror("TIOCOUTQ %u, but only %zu expected\n", queued, total); + assert(nsd <= queued); + + if (queued == 0) + return; + + /* wait for peer to ack rx of all data */ + req.tv_sec = 0; + req.tv_nsec = 1 * 1000 * 1000ul; /* 1ms */ + nanosleep(&req, NULL); + } + + xerror("still tx data queued after %u ms\n", timeout); +} + +static void connect_one_server(int fd, int unixfd) +{ + size_t len, i, total, sent; + char buf[4096], buf2[4096]; + ssize_t ret; + + len = rand() % (sizeof(buf) - 1); + + if (len < 128) + len = 128; + + for (i = 0; i < len ; i++) { + buf[i] = rand() % 26; + buf[i] += 'A'; + } + + buf[i] = '\n'; + + /* un-block server */ + ret = read(unixfd, buf2, 4); + assert(ret == 4); + + assert(strncmp(buf2, "xmit", 4) == 0); + + ret = write(unixfd, &len, sizeof(len)); + assert(ret == (ssize_t)sizeof(len)); + + ret = write(fd, buf, len); + if (ret < 0) + die_perror("write"); + + if (ret != (ssize_t)len) + xerror("short write"); + + ret = read(unixfd, buf2, 4); + assert(strncmp(buf2, "huge", 4) == 0); + + total = rand() % (16 * 1024 * 1024); + total += (1 * 1024 * 1024); + sent = total; + + ret = write(unixfd, &total, sizeof(total)); + assert(ret == (ssize_t)sizeof(total)); + + wait_for_ack(fd, 5000, len); + + while (total > 0) { + if (total > sizeof(buf)) + len = sizeof(buf); + else + len = total; + + ret = write(fd, buf, len); + if (ret < 0) + die_perror("write"); + total -= ret; + + /* we don't have to care about buf content, only + * number of total bytes sent + */ + } + + ret = read(unixfd, buf2, 4); + assert(ret == 4); + assert(strncmp(buf2, "shut", 4) == 0); + + wait_for_ack(fd, 5000, sent); + + ret = write(fd, buf, 1); + assert(ret == 1); + close(fd); + ret = write(unixfd, "closed", 6); + assert(ret == 6); + + close(unixfd); +} + +static void get_tcp_inq(struct msghdr *msgh, unsigned int *inqv) +{ + struct cmsghdr *cmsg; + + for (cmsg = CMSG_FIRSTHDR(msgh); cmsg ; cmsg = CMSG_NXTHDR(msgh, cmsg)) { + if (cmsg->cmsg_level == IPPROTO_TCP && cmsg->cmsg_type == TCP_CM_INQ) { + memcpy(inqv, CMSG_DATA(cmsg), sizeof(*inqv)); + return; + } + } + + xerror("could not find TCP_CM_INQ cmsg type"); +} + +static void process_one_client(int fd, int unixfd) +{ + unsigned int tcp_inq; + size_t expect_len; + char msg_buf[4096]; + char buf[4096]; + char tmp[16]; + struct iovec iov = { + .iov_base = buf, + .iov_len = 1, + }; + struct msghdr msg = { + .msg_iov = &iov, + .msg_iovlen = 1, + .msg_control = msg_buf, + .msg_controllen = sizeof(msg_buf), + }; + ssize_t ret, tot; + + ret = write(unixfd, "xmit", 4); + assert(ret == 4); + + ret = read(unixfd, &expect_len, sizeof(expect_len)); + assert(ret == (ssize_t)sizeof(expect_len)); + + if (expect_len > sizeof(buf)) + xerror("expect len %zu exceeds buffer size", expect_len); + + for (;;) { + struct timespec req; + unsigned int queued; + + ret = ioctl(fd, FIONREAD, &queued); + if (ret < 0) + die_perror("FIONREAD"); + if (queued > expect_len) + xerror("FIONREAD returned %u, but only %zu expected\n", + queued, expect_len); + if (queued == expect_len) + break; + + req.tv_sec = 0; + req.tv_nsec = 1000 * 1000ul; + nanosleep(&req, NULL); + } + + /* read one byte, expect cmsg to return expected - 1 */ + ret = recvmsg(fd, &msg, 0); + if (ret < 0) + die_perror("recvmsg"); + + if (msg.msg_controllen == 0) + xerror("msg_controllen is 0"); + + get_tcp_inq(&msg, &tcp_inq); + + assert((size_t)tcp_inq == (expect_len - 1)); + + iov.iov_len = sizeof(buf); + ret = recvmsg(fd, &msg, 0); + if (ret < 0) + die_perror("recvmsg"); + + /* should have gotten exact remainder of all pending data */ + assert(ret == (ssize_t)tcp_inq); + + /* should be 0, all drained */ + get_tcp_inq(&msg, &tcp_inq); + assert(tcp_inq == 0); + + /* request a large swath of data. */ + ret = write(unixfd, "huge", 4); + assert(ret == 4); + + ret = read(unixfd, &expect_len, sizeof(expect_len)); + assert(ret == (ssize_t)sizeof(expect_len)); + + /* peer should send us a few mb of data */ + if (expect_len <= sizeof(buf)) + xerror("expect len %zu too small\n", expect_len); + + tot = 0; + do { + iov.iov_len = sizeof(buf); + ret = recvmsg(fd, &msg, 0); + if (ret < 0) + die_perror("recvmsg"); + + tot += ret; + + get_tcp_inq(&msg, &tcp_inq); + + if (tcp_inq > expect_len - tot) + xerror("inq %d, remaining %d total_len %d\n", + tcp_inq, expect_len - tot, (int)expect_len); + + assert(tcp_inq <= expect_len - tot); + } while ((size_t)tot < expect_len); + + ret = write(unixfd, "shut", 4); + assert(ret == 4); + + /* wait for hangup. Should have received one more byte of data. */ + ret = read(unixfd, tmp, sizeof(tmp)); + assert(ret == 6); + assert(strncmp(tmp, "closed", 6) == 0); + + sleep(1); + + iov.iov_len = 1; + ret = recvmsg(fd, &msg, 0); + if (ret < 0) + die_perror("recvmsg"); + assert(ret == 1); + + get_tcp_inq(&msg, &tcp_inq); + + /* tcp_inq should be 1 due to received fin. */ + assert(tcp_inq == 1); + + iov.iov_len = 1; + ret = recvmsg(fd, &msg, 0); + if (ret < 0) + die_perror("recvmsg"); + + /* expect EOF */ + assert(ret == 0); + get_tcp_inq(&msg, &tcp_inq); + assert(tcp_inq == 1); + + close(fd); +} + +static int xaccept(int s) +{ + int fd = accept(s, NULL, 0); + + if (fd < 0) + die_perror("accept"); + + return fd; +} + +static int server(int unixfd) +{ + int fd = -1, r, on = 1; + + switch (pf) { + case AF_INET: + fd = sock_listen_mptcp("127.0.0.1", "15432"); + break; + case AF_INET6: + fd = sock_listen_mptcp("::1", "15432"); + break; + default: + xerror("Unknown pf %d\n", pf); + break; + } + + r = write(unixfd, "conn", 4); + assert(r == 4); + + alarm(15); + r = xaccept(fd); + + if (-1 == setsockopt(r, IPPROTO_TCP, TCP_INQ, &on, sizeof(on))) + die_perror("setsockopt"); + + process_one_client(r, unixfd); + + return 0; +} + +static int client(int unixfd) +{ + int fd = -1; + + alarm(15); + + switch (pf) { + case AF_INET: + fd = sock_connect_mptcp("127.0.0.1", "15432", proto_tx); + break; + case AF_INET6: + fd = sock_connect_mptcp("::1", "15432", proto_tx); + break; + default: + xerror("Unknown pf %d\n", pf); + } + + connect_one_server(fd, unixfd); + + return 0; +} + +static void init_rng(void) +{ + int fd = open("/dev/urandom", O_RDONLY); + unsigned int foo; + + if (fd > 0) { + int ret = read(fd, &foo, sizeof(foo)); + + if (ret < 0) + srand(fd + foo); + close(fd); + } + + srand(foo); +} + +static pid_t xfork(void) +{ + pid_t p = fork(); + + if (p < 0) + die_perror("fork"); + else if (p == 0) + init_rng(); + + return p; +} + +static int rcheck(int wstatus, const char *what) +{ + if (WIFEXITED(wstatus)) { + if (WEXITSTATUS(wstatus) == 0) + return 0; + fprintf(stderr, "%s exited, status=%d\n", what, WEXITSTATUS(wstatus)); + return WEXITSTATUS(wstatus); + } else if (WIFSIGNALED(wstatus)) { + xerror("%s killed by signal %d\n", what, WTERMSIG(wstatus)); + } else if (WIFSTOPPED(wstatus)) { + xerror("%s stopped by signal %d\n", what, WSTOPSIG(wstatus)); + } + + return 111; +} + +int main(int argc, char *argv[]) +{ + int e1, e2, wstatus; + pid_t s, c, ret; + int unixfds[2]; + + parse_opts(argc, argv); + + e1 = socketpair(AF_UNIX, SOCK_DGRAM, 0, unixfds); + if (e1 < 0) + die_perror("pipe"); + + s = xfork(); + if (s == 0) + return server(unixfds[1]); + + close(unixfds[1]); + + /* wait until server bound a socket */ + e1 = read(unixfds[0], &e1, 4); + assert(e1 == 4); + + c = xfork(); + if (c == 0) + return client(unixfds[0]); + + close(unixfds[0]); + + ret = waitpid(s, &wstatus, 0); + if (ret == -1) + die_perror("waitpid"); + e1 = rcheck(wstatus, "server"); + ret = waitpid(c, &wstatus, 0); + if (ret == -1) + die_perror("waitpid"); + e2 = rcheck(wstatus, "client"); + + return e1 ? e1 : e2; +} diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c index 417b11cafafe..ac9a4d9c1764 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c +++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c @@ -4,6 +4,7 @@ #include <assert.h> #include <errno.h> +#include <fcntl.h> #include <limits.h> #include <string.h> #include <stdarg.h> @@ -13,6 +14,7 @@ #include <stdio.h> #include <stdlib.h> #include <strings.h> +#include <time.h> #include <unistd.h> #include <sys/socket.h> @@ -594,6 +596,44 @@ static int server(int pipefd) return 0; } +static void test_ip_tos_sockopt(int fd) +{ + uint8_t tos_in, tos_out; + socklen_t s; + int r; + + tos_in = rand() & 0xfc; + r = setsockopt(fd, SOL_IP, IP_TOS, &tos_in, sizeof(tos_out)); + if (r != 0) + die_perror("setsockopt IP_TOS"); + + tos_out = 0; + s = sizeof(tos_out); + r = getsockopt(fd, SOL_IP, IP_TOS, &tos_out, &s); + if (r != 0) + die_perror("getsockopt IP_TOS"); + + if (tos_in != tos_out) + xerror("tos %x != %x socklen_t %d\n", tos_in, tos_out, s); + + if (s != 1) + xerror("tos should be 1 byte"); + + s = 0; + r = getsockopt(fd, SOL_IP, IP_TOS, &tos_out, &s); + if (r != 0) + die_perror("getsockopt IP_TOS 0"); + if (s != 0) + xerror("expect socklen_t == 0"); + + s = -1; + r = getsockopt(fd, SOL_IP, IP_TOS, &tos_out, &s); + if (r != -1 && errno != EINVAL) + die_perror("getsockopt IP_TOS did not indicate -EINVAL"); + if (s != -1) + xerror("expect socklen_t == -1"); +} + static int client(int pipefd) { int fd = -1; @@ -611,6 +651,8 @@ static int client(int pipefd) xerror("Unknown pf %d\n", pf); } + test_ip_tos_sockopt(fd); + connect_one_server(fd, pipefd); return 0; @@ -642,6 +684,25 @@ static int rcheck(int wstatus, const char *what) return 111; } +static void init_rng(void) +{ + int fd = open("/dev/urandom", O_RDONLY); + + if (fd >= 0) { + unsigned int foo; + ssize_t ret; + + /* can't fail */ + ret = read(fd, &foo, sizeof(foo)); + assert(ret == sizeof(foo)); + + close(fd); + srand(foo); + } else { + srand(time(NULL)); + } +} + int main(int argc, char *argv[]) { int e1, e2, wstatus; @@ -650,6 +711,8 @@ int main(int argc, char *argv[]) parse_opts(argc, argv); + init_rng(); + e1 = pipe(pipefds); if (e1 < 0) die_perror("pipe"); diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh index 41de643788b8..0879da915014 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh @@ -178,7 +178,7 @@ do_transfer() timeout ${timeout_test} \ ip netns exec ${listener_ns} \ - $mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c TIMESTAMPNS \ + $mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c TIMESTAMPNS,TCPINQ \ ${local_addr} < "$sin" > "$sout" & spid=$! @@ -186,7 +186,7 @@ do_transfer() timeout ${timeout_test} \ ip netns exec ${connector_ns} \ - $mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c TIMESTAMPNS \ + $mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c TIMESTAMPNS,TCPINQ \ $connect_addr < "$cin" > "$cout" & cpid=$! @@ -279,6 +279,45 @@ run_tests() fi } +do_tcpinq_test() +{ + ip netns exec "$ns1" ./mptcp_inq "$@" + lret=$? + if [ $lret -ne 0 ];then + ret=$lret + echo "FAIL: mptcp_inq $@" 1>&2 + return $lret + fi + + echo "PASS: TCP_INQ cmsg/ioctl $@" + return $lret +} + +do_tcpinq_tests() +{ + local lret=0 + + ip netns exec "$ns1" iptables -F + ip netns exec "$ns1" ip6tables -F + + for args in "-t tcp" "-r tcp"; do + do_tcpinq_test $args + lret=$? + if [ $lret -ne 0 ] ; then + return $lret + fi + do_tcpinq_test -6 $args + lret=$? + if [ $lret -ne 0 ] ; then + return $lret + fi + done + + do_tcpinq_test -r tcp -t tcp + + return $? +} + sin=$(mktemp) sout=$(mktemp) cin=$(mktemp) @@ -300,4 +339,5 @@ if [ $ret -eq 0 ];then echo "PASS: SOL_MPTCP getsockopt has expected information" fi +do_tcpinq_tests exit $ret diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index 8a22db0cca49..6e468e0f42f7 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -31,6 +31,8 @@ struct tls_crypto_info_keys { struct tls12_crypto_info_chacha20_poly1305 chacha20; struct tls12_crypto_info_sm4_gcm sm4gcm; struct tls12_crypto_info_sm4_ccm sm4ccm; + struct tls12_crypto_info_aes_ccm_128 aesccm128; + struct tls12_crypto_info_aes_gcm_256 aesgcm256; }; size_t len; }; @@ -61,6 +63,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type, tls12->sm4ccm.info.version = tls_version; tls12->sm4ccm.info.cipher_type = cipher_type; break; + case TLS_CIPHER_AES_CCM_128: + tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128); + tls12->aesccm128.info.version = tls_version; + tls12->aesccm128.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_AES_GCM_256: + tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256); + tls12->aesgcm256.info.version = tls_version; + tls12->aesgcm256.info.cipher_type = cipher_type; + break; default: break; } @@ -261,6 +273,30 @@ FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm) .cipher_type = TLS_CIPHER_SM4_CCM, }; +FIXTURE_VARIANT_ADD(tls, 12_aes_ccm) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; + +FIXTURE_VARIANT_ADD(tls, 13_aes_ccm) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; + +FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; + +FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; + FIXTURE_SETUP(tls) { struct tls_crypto_info_keys tls12; diff --git a/tools/testing/selftests/netfilter/conntrack_vrf.sh b/tools/testing/selftests/netfilter/conntrack_vrf.sh index 91f3ef0f1192..8b5ea9234588 100755 --- a/tools/testing/selftests/netfilter/conntrack_vrf.sh +++ b/tools/testing/selftests/netfilter/conntrack_vrf.sh @@ -150,11 +150,27 @@ EOF # oifname is the vrf device. test_masquerade_vrf() { + local qdisc=$1 + + if [ "$qdisc" != "default" ]; then + tc -net $ns0 qdisc add dev tvrf root $qdisc + fi + ip netns exec $ns0 conntrack -F 2>/dev/null ip netns exec $ns0 nft -f - <<EOF flush ruleset table ip nat { + chain rawout { + type filter hook output priority raw; + + oif tvrf ct state untracked counter + } + chain postrouting2 { + type filter hook postrouting priority mangle; + + oif tvrf ct state untracked counter + } chain postrouting { type nat hook postrouting priority 0; # NB: masquerade should always be combined with 'oif(name) bla', @@ -171,13 +187,18 @@ EOF fi # must also check that nat table was evaluated on second (lower device) iteration. - ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' + ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' && + ip netns exec $ns0 nft list table ip nat |grep -q 'untracked counter packets [1-9]' if [ $? -eq 0 ]; then - echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device" + echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device ($qdisc qdisc)" else - echo "FAIL: vrf masq rule has unexpected counter value" + echo "FAIL: vrf rules have unexpected counter value" ret=1 fi + + if [ "$qdisc" != "default" ]; then + tc -net $ns0 qdisc del dev tvrf root + fi } # add masq rule that gets evaluated w. outif set to veth device. @@ -213,7 +234,8 @@ EOF } test_ct_zone_in -test_masquerade_vrf +test_masquerade_vrf "default" +test_masquerade_vrf "pfifo" test_masquerade_veth exit $ret diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh index 5a4938d6dcf2..ed61f6cab60f 100755 --- a/tools/testing/selftests/netfilter/nft_concat_range.sh +++ b/tools/testing/selftests/netfilter/nft_concat_range.sh @@ -23,8 +23,8 @@ TESTS="reported_issues correctness concurrency timeout" # Set types, defined by TYPE_ variables below TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto - net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port - net_port_mac_proto_net" + net_port_net net_mac mac_net net_mac_icmp net6_mac_icmp + net6_port_net6_port net_port_mac_proto_net" # Reported bugs, also described by TYPE_ variables below BUGS="flush_remove_add" @@ -277,6 +277,23 @@ perf_entries 1000 perf_proto ipv4 " +TYPE_mac_net=" +display mac,net +type_spec ether_addr . ipv4_addr +chain_spec ether saddr . ip saddr +dst +src mac addr4 +start 1 +count 5 +src_delta 2000 +tools sendip nc bash +proto udp + +race_repeat 0 + +perf_duration 0 +" + TYPE_net_mac_icmp=" display net,mac - ICMP type_spec ipv4_addr . ether_addr @@ -984,7 +1001,8 @@ format() { fi done for f in ${src}; do - __expr="${__expr} . " + [ "${__expr}" != "{ " ] && __expr="${__expr} . " + __start="$(eval format_"${f}" "${srcstart}")" __end="$(eval format_"${f}" "${srcend}")" diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh index ac646376eb01..04633119b29a 100755 --- a/tools/testing/selftests/netfilter/nft_zones_many.sh +++ b/tools/testing/selftests/netfilter/nft_zones_many.sh @@ -18,11 +18,17 @@ cleanup() ip netns del $ns } -ip netns add $ns -if [ $? -ne 0 ];then - echo "SKIP: Could not create net namespace $gw" - exit $ksft_skip -fi +checktool (){ + if ! $1 > /dev/null 2>&1; then + echo "SKIP: Could not $2" + exit $ksft_skip + fi +} + +checktool "nft --version" "run test without nft tool" +checktool "ip -Version" "run test without ip tool" +checktool "socat -V" "run test without socat tool" +checktool "ip netns add $ns" "create net namespace" trap cleanup EXIT @@ -71,7 +77,8 @@ EOF local start=$(date +%s%3N) i=$((i + 10000)) j=$((j + 1)) - dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null + # nft rule in output places each packet in a different zone. + dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345 if [ $? -ne 0 ] ;then ret=1 break diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config index b71828df5a6d..a3239d5e40c7 100644 --- a/tools/testing/selftests/tc-testing/config +++ b/tools/testing/selftests/tc-testing/config @@ -60,6 +60,8 @@ CONFIG_NET_IFE_SKBTCINDEX=m CONFIG_NET_SCH_FIFO=y CONFIG_NET_SCH_ETS=m CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NETDEVSIM=m # ## Network testing diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py index a3e43189d940..ee22e3447ec7 100755 --- a/tools/testing/selftests/tc-testing/tdc.py +++ b/tools/testing/selftests/tc-testing/tdc.py @@ -716,6 +716,7 @@ def set_operation_mode(pm, parser, args, remaining): list_test_cases(alltests) exit(0) + exit_code = 0 # KSFT_PASS if len(alltests): req_plugins = pm.get_required_plugins(alltests) try: @@ -724,6 +725,8 @@ def set_operation_mode(pm, parser, args, remaining): print('The following plugins were not found:') print('{}'.format(pde.missing_pg)) catresults = test_runner(pm, args, alltests) + if catresults.count_failures() != 0: + exit_code = 1 # KSFT_FAIL if args.format == 'none': print('Test results output suppression requested\n') else: @@ -748,6 +751,8 @@ def set_operation_mode(pm, parser, args, remaining): gid=int(os.getenv('SUDO_GID'))) else: print('No tests found\n') + exit_code = 4 # KSFT_SKIP + exit(exit_code) def main(): """ @@ -767,8 +772,5 @@ def main(): set_operation_mode(pm, parser, args, remaining) - exit(0) - - if __name__ == "__main__": main() diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh index 7fe38c76db44..afb0cd86fa3d 100755 --- a/tools/testing/selftests/tc-testing/tdc.sh +++ b/tools/testing/selftests/tc-testing/tdc.sh @@ -1,5 +1,6 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 +modprobe netdevsim ./tdc.py -c actions --nobuildebpf ./tdc.py -c qdisc diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh index ebc4ee0fe179..8a9461aa0878 100755 --- a/tools/testing/selftests/wireguard/netns.sh +++ b/tools/testing/selftests/wireguard/netns.sh @@ -276,7 +276,11 @@ n0 ping -W 1 -c 1 192.168.241.2 n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7 ip2 link del wg0 ip2 link del wg1 -! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel +read _ _ tx_bytes_before < <(n0 wg show wg1 transfer) +! n0 ping -W 1 -c 10 -f 192.168.241.2 || false +sleep 1 +read _ _ tx_bytes_after < <(n0 wg show wg1 transfer) +(( tx_bytes_after - tx_bytes_before < 70000 )) ip0 link del wg1 ip1 link del wg0 @@ -609,6 +613,28 @@ ip0 link set wg0 up kill $ncat_pid ip0 link del wg0 +# Ensure that dst_cache references don't outlive netns lifetime +ip1 link add dev wg0 type wireguard +ip2 link add dev wg0 type wireguard +configure_peers +ip1 link add veth1 type veth peer name veth2 +ip1 link set veth2 netns $netns2 +ip1 addr add fd00:aa::1/64 dev veth1 +ip2 addr add fd00:aa::2/64 dev veth2 +ip1 link set veth1 up +ip2 link set veth2 up +waitiface $netns1 veth1 +waitiface $netns2 veth2 +ip1 -6 route add default dev veth1 via fd00:aa::2 +ip2 -6 route add default dev veth2 via fd00:aa::1 +n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2 +n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1 +n1 ping6 -c 1 fd00::2 +pp ip netns delete $netns1 +pp ip netns delete $netns2 +pp ip netns add $netns1 +pp ip netns add $netns2 + # Ensure there aren't circular reference loops ip1 link add wg1 type wireguard ip2 link add wg2 type wireguard @@ -627,7 +653,7 @@ while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do done < /dev/kmsg alldeleted=1 for object in "${!objects[@]}"; do - if [[ ${objects["$object"]} != *createddestroyed ]]; then + if [[ ${objects["$object"]} != *createddestroyed && ${objects["$object"]} != *createdcreateddestroyeddestroyed ]]; then echo "Error: $object: merely ${objects["$object"]}" >&3 alldeleted=0 fi diff --git a/tools/testing/selftests/wireguard/qemu/debug.config b/tools/testing/selftests/wireguard/qemu/debug.config index fe07d97df9fa..2b321b8a96cf 100644 --- a/tools/testing/selftests/wireguard/qemu/debug.config +++ b/tools/testing/selftests/wireguard/qemu/debug.config @@ -47,7 +47,7 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_TRACE_IRQFLAGS=y CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_LIST=y -CONFIG_DEBUG_PI_LIST=y +CONFIG_DEBUG_PLIST=y CONFIG_PROVE_RCU=y CONFIG_SPARSE_RCU_POINTER=y CONFIG_RCU_CPU_STALL_TIMEOUT=21 diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config index 74db83a0aedd..a9b5a520a1d2 100644 --- a/tools/testing/selftests/wireguard/qemu/kernel.config +++ b/tools/testing/selftests/wireguard/qemu/kernel.config @@ -66,6 +66,7 @@ CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15 +CONFIG_LOG_BUF_SHIFT=18 CONFIG_PRINTK_TIME=y CONFIG_BLK_DEV_INITRD=y CONFIG_LEGACY_VSYSCALL_NONE=y diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9646bb9112c1..72c4e6b39389 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1531,11 +1531,10 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, static int kvm_set_memslot(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot *old, struct kvm_memory_slot *new, int as_id, enum kvm_mr_change change) { - struct kvm_memory_slot *slot; + struct kvm_memory_slot *slot, old; struct kvm_memslots *slots; int r; @@ -1566,7 +1565,7 @@ static int kvm_set_memslot(struct kvm *kvm, * Note, the INVALID flag needs to be in the appropriate entry * in the freshly allocated memslots, not in @old or @new. */ - slot = id_to_memslot(slots, old->id); + slot = id_to_memslot(slots, new->id); slot->flags |= KVM_MEMSLOT_INVALID; /* @@ -1597,6 +1596,26 @@ static int kvm_set_memslot(struct kvm *kvm, kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id)); } + /* + * Make a full copy of the old memslot, the pointer will become stale + * when the memslots are re-sorted by update_memslots(), and the old + * memslot needs to be referenced after calling update_memslots(), e.g. + * to free its resources and for arch specific behavior. This needs to + * happen *after* (re)acquiring slots_arch_lock. + */ + slot = id_to_memslot(slots, new->id); + if (slot) { + old = *slot; + } else { + WARN_ON_ONCE(change != KVM_MR_CREATE); + memset(&old, 0, sizeof(old)); + old.id = new->id; + old.as_id = as_id; + } + + /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */ + memcpy(&new->arch, &old.arch, sizeof(old.arch)); + r = kvm_arch_prepare_memory_region(kvm, new, mem, change); if (r) goto out_slots; @@ -1604,14 +1623,18 @@ static int kvm_set_memslot(struct kvm *kvm, update_memslots(slots, new, change); slots = install_new_memslots(kvm, as_id, slots); - kvm_arch_commit_memory_region(kvm, mem, old, new, change); + kvm_arch_commit_memory_region(kvm, mem, &old, new, change); + + /* Free the old memslot's metadata. Note, this is the full copy!!! */ + if (change == KVM_MR_DELETE) + kvm_free_memslot(kvm, &old); kvfree(slots); return 0; out_slots: if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { - slot = id_to_memslot(slots, old->id); + slot = id_to_memslot(slots, new->id); slot->flags &= ~KVM_MEMSLOT_INVALID; slots = install_new_memslots(kvm, as_id, slots); } else { @@ -1626,7 +1649,6 @@ static int kvm_delete_memslot(struct kvm *kvm, struct kvm_memory_slot *old, int as_id) { struct kvm_memory_slot new; - int r; if (!old->npages) return -EINVAL; @@ -1639,12 +1661,7 @@ static int kvm_delete_memslot(struct kvm *kvm, */ new.as_id = as_id; - r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); - if (r) - return r; - - kvm_free_memslot(kvm, old); - return 0; + return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE); } /* @@ -1672,7 +1689,8 @@ int __kvm_set_memory_region(struct kvm *kvm, id = (u16)mem->slot; /* General sanity checks */ - if (mem->memory_size & (PAGE_SIZE - 1)) + if ((mem->memory_size & (PAGE_SIZE - 1)) || + (mem->memory_size != (unsigned long)mem->memory_size)) return -EINVAL; if (mem->guest_phys_addr & (PAGE_SIZE - 1)) return -EINVAL; @@ -1718,7 +1736,6 @@ int __kvm_set_memory_region(struct kvm *kvm, if (!old.npages) { change = KVM_MR_CREATE; new.dirty_bitmap = NULL; - memset(&new.arch, 0, sizeof(new.arch)); } else { /* Modify an existing slot. */ if ((new.userspace_addr != old.userspace_addr) || (new.npages != old.npages) || @@ -1732,9 +1749,8 @@ int __kvm_set_memory_region(struct kvm *kvm, else /* Nothing to change. */ return 0; - /* Copy dirty_bitmap and arch from the current memslot. */ + /* Copy dirty_bitmap from the current memslot. */ new.dirty_bitmap = old.dirty_bitmap; - memcpy(&new.arch, &old.arch, sizeof(new.arch)); } if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { @@ -1760,7 +1776,7 @@ int __kvm_set_memory_region(struct kvm *kvm, bitmap_set(new.dirty_bitmap, 0, new.npages); } - r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change); + r = kvm_set_memslot(kvm, mem, &new, as_id, change); if (r) goto out_bitmap; @@ -2915,7 +2931,8 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int r; gpa_t gpa = ghc->gpa + offset; - BUG_ON(len + offset > ghc->len); + if (WARN_ON_ONCE(len + offset > ghc->len)) + return -EINVAL; if (slots->generation != ghc->generation) { if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) @@ -2952,7 +2969,8 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int r; gpa_t gpa = ghc->gpa + offset; - BUG_ON(len + offset > ghc->len); + if (WARN_ON_ONCE(len + offset > ghc->len)) + return -EINVAL; if (slots->generation != ghc->generation) { if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) |